@@ -139,6 +139,7 @@ struct submit_info {
139
139
int directed ;
140
140
void * token ;
141
141
void * data ;
142
+ int flags ;
142
143
int len ;
143
144
};
144
145
@@ -184,6 +185,8 @@ static struct cpdma_control_info controls[] = {
184
185
(directed << CPDMA_TO_PORT_SHIFT)); \
185
186
} while (0)
186
187
188
+ #define CPDMA_DMA_EXT_MAP BIT(16)
189
+
187
190
static void cpdma_desc_pool_destroy (struct cpdma_ctlr * ctlr )
188
191
{
189
192
struct cpdma_desc_pool * pool = ctlr -> pool ;
@@ -1015,6 +1018,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
1015
1018
struct cpdma_chan * chan = si -> chan ;
1016
1019
struct cpdma_ctlr * ctlr = chan -> ctlr ;
1017
1020
int len = si -> len ;
1021
+ int swlen = len ;
1018
1022
struct cpdma_desc __iomem * desc ;
1019
1023
dma_addr_t buffer ;
1020
1024
u32 mode ;
@@ -1036,16 +1040,22 @@ static int cpdma_chan_submit_si(struct submit_info *si)
1036
1040
chan -> stats .runt_transmit_buff ++ ;
1037
1041
}
1038
1042
1039
- buffer = dma_map_single (ctlr -> dev , si -> data , len , chan -> dir );
1040
- ret = dma_mapping_error (ctlr -> dev , buffer );
1041
- if (ret ) {
1042
- cpdma_desc_free (ctlr -> pool , desc , 1 );
1043
- return - EINVAL ;
1044
- }
1045
-
1046
1043
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP ;
1047
1044
cpdma_desc_to_port (chan , mode , si -> directed );
1048
1045
1046
+ if (si -> flags & CPDMA_DMA_EXT_MAP ) {
1047
+ buffer = (dma_addr_t )si -> data ;
1048
+ dma_sync_single_for_device (ctlr -> dev , buffer , len , chan -> dir );
1049
+ swlen |= CPDMA_DMA_EXT_MAP ;
1050
+ } else {
1051
+ buffer = dma_map_single (ctlr -> dev , si -> data , len , chan -> dir );
1052
+ ret = dma_mapping_error (ctlr -> dev , buffer );
1053
+ if (ret ) {
1054
+ cpdma_desc_free (ctlr -> pool , desc , 1 );
1055
+ return - EINVAL ;
1056
+ }
1057
+ }
1058
+
1049
1059
/* Relaxed IO accessors can be used here as there is read barrier
1050
1060
* at the end of write sequence.
1051
1061
*/
@@ -1055,7 +1065,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
1055
1065
writel_relaxed (mode | len , & desc -> hw_mode );
1056
1066
writel_relaxed ((uintptr_t )si -> token , & desc -> sw_token );
1057
1067
writel_relaxed (buffer , & desc -> sw_buffer );
1058
- writel_relaxed (len , & desc -> sw_len );
1068
+ writel_relaxed (swlen , & desc -> sw_len );
1059
1069
desc_read (desc , sw_len );
1060
1070
1061
1071
__cpdma_chan_submit (chan , desc );
@@ -1079,6 +1089,32 @@ int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
1079
1089
si .data = data ;
1080
1090
si .len = len ;
1081
1091
si .directed = directed ;
1092
+ si .flags = 0 ;
1093
+
1094
+ spin_lock_irqsave (& chan -> lock , flags );
1095
+ if (chan -> state == CPDMA_STATE_TEARDOWN ) {
1096
+ spin_unlock_irqrestore (& chan -> lock , flags );
1097
+ return - EINVAL ;
1098
+ }
1099
+
1100
+ ret = cpdma_chan_submit_si (& si );
1101
+ spin_unlock_irqrestore (& chan -> lock , flags );
1102
+ return ret ;
1103
+ }
1104
+
1105
+ int cpdma_chan_idle_submit_mapped (struct cpdma_chan * chan , void * token ,
1106
+ dma_addr_t data , int len , int directed )
1107
+ {
1108
+ struct submit_info si ;
1109
+ unsigned long flags ;
1110
+ int ret ;
1111
+
1112
+ si .chan = chan ;
1113
+ si .token = token ;
1114
+ si .data = (void * )data ;
1115
+ si .len = len ;
1116
+ si .directed = directed ;
1117
+ si .flags = CPDMA_DMA_EXT_MAP ;
1082
1118
1083
1119
spin_lock_irqsave (& chan -> lock , flags );
1084
1120
if (chan -> state == CPDMA_STATE_TEARDOWN ) {
@@ -1103,6 +1139,32 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1103
1139
si .data = data ;
1104
1140
si .len = len ;
1105
1141
si .directed = directed ;
1142
+ si .flags = 0 ;
1143
+
1144
+ spin_lock_irqsave (& chan -> lock , flags );
1145
+ if (chan -> state != CPDMA_STATE_ACTIVE ) {
1146
+ spin_unlock_irqrestore (& chan -> lock , flags );
1147
+ return - EINVAL ;
1148
+ }
1149
+
1150
+ ret = cpdma_chan_submit_si (& si );
1151
+ spin_unlock_irqrestore (& chan -> lock , flags );
1152
+ return ret ;
1153
+ }
1154
+
1155
+ int cpdma_chan_submit_mapped (struct cpdma_chan * chan , void * token ,
1156
+ dma_addr_t data , int len , int directed )
1157
+ {
1158
+ struct submit_info si ;
1159
+ unsigned long flags ;
1160
+ int ret ;
1161
+
1162
+ si .chan = chan ;
1163
+ si .token = token ;
1164
+ si .data = (void * )data ;
1165
+ si .len = len ;
1166
+ si .directed = directed ;
1167
+ si .flags = CPDMA_DMA_EXT_MAP ;
1106
1168
1107
1169
spin_lock_irqsave (& chan -> lock , flags );
1108
1170
if (chan -> state != CPDMA_STATE_ACTIVE ) {
@@ -1140,10 +1202,17 @@ static void __cpdma_chan_free(struct cpdma_chan *chan,
1140
1202
uintptr_t token ;
1141
1203
1142
1204
token = desc_read (desc , sw_token );
1143
- buff_dma = desc_read (desc , sw_buffer );
1144
1205
origlen = desc_read (desc , sw_len );
1145
1206
1146
- dma_unmap_single (ctlr -> dev , buff_dma , origlen , chan -> dir );
1207
+ buff_dma = desc_read (desc , sw_buffer );
1208
+ if (origlen & CPDMA_DMA_EXT_MAP ) {
1209
+ origlen &= ~CPDMA_DMA_EXT_MAP ;
1210
+ dma_sync_single_for_cpu (ctlr -> dev , buff_dma , origlen ,
1211
+ chan -> dir );
1212
+ } else {
1213
+ dma_unmap_single (ctlr -> dev , buff_dma , origlen , chan -> dir );
1214
+ }
1215
+
1147
1216
cpdma_desc_free (pool , desc , 1 );
1148
1217
(* chan -> handler )((void * )token , outlen , status );
1149
1218
}
0 commit comments