@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
201
201
struct dma_device dma_dev ;
202
202
bool m2m ;
203
203
int (* hw_setup )(struct ep93xx_dma_chan * );
204
+ void (* hw_synchronize )(struct ep93xx_dma_chan * );
204
205
void (* hw_shutdown )(struct ep93xx_dma_chan * );
205
206
void (* hw_submit )(struct ep93xx_dma_chan * );
206
207
int (* hw_interrupt )(struct ep93xx_dma_chan * );
@@ -333,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
333
334
return (readl (edmac -> regs + M2P_STATUS ) >> 4 ) & 0x3 ;
334
335
}
335
336
336
- static void m2p_hw_shutdown (struct ep93xx_dma_chan * edmac )
337
+ static void m2p_hw_synchronize (struct ep93xx_dma_chan * edmac )
337
338
{
339
+ unsigned long flags ;
338
340
u32 control ;
339
341
342
+ spin_lock_irqsave (& edmac -> lock , flags );
340
343
control = readl (edmac -> regs + M2P_CONTROL );
341
344
control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT );
342
345
m2p_set_control (edmac , control );
346
+ spin_unlock_irqrestore (& edmac -> lock , flags );
343
347
344
348
while (m2p_channel_state (edmac ) >= M2P_STATE_ON )
345
- cpu_relax ();
349
+ schedule ();
350
+ }
346
351
352
+ static void m2p_hw_shutdown (struct ep93xx_dma_chan * edmac )
353
+ {
347
354
m2p_set_control (edmac , 0 );
348
355
349
- while (m2p_channel_state (edmac ) == M2P_STATE_STALL )
350
- cpu_relax ( );
356
+ while (m2p_channel_state (edmac ) != M2P_STATE_IDLE )
357
+ dev_warn ( chan2dev ( edmac ), "M2P: Not yet IDLE\n" );
351
358
}
352
359
353
360
static void m2p_fill_desc (struct ep93xx_dma_chan * edmac )
@@ -1162,6 +1169,26 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1162
1169
return NULL ;
1163
1170
}
1164
1171
1172
+ /**
1173
+ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1174
+ * current context.
1175
+ * @chan: channel
1176
+ *
1177
+ * Synchronizes the DMA channel termination to the current context. When this
1178
+ * function returns it is guaranteed that all transfers for previously issued
1179
+ * descriptors have stopped and and it is safe to free the memory associated
1180
+ * with them. Furthermore it is guaranteed that all complete callback functions
1181
+ * for a previously submitted descriptor have finished running and it is safe to
1182
+ * free resources accessed from within the complete callbacks.
1183
+ */
1184
+ static void ep93xx_dma_synchronize (struct dma_chan * chan )
1185
+ {
1186
+ struct ep93xx_dma_chan * edmac = to_ep93xx_dma_chan (chan );
1187
+
1188
+ if (edmac -> edma -> hw_synchronize )
1189
+ edmac -> edma -> hw_synchronize (edmac );
1190
+ }
1191
+
1165
1192
/**
1166
1193
* ep93xx_dma_terminate_all - terminate all transactions
1167
1194
* @chan: channel
@@ -1325,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1325
1352
dma_dev -> device_prep_slave_sg = ep93xx_dma_prep_slave_sg ;
1326
1353
dma_dev -> device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic ;
1327
1354
dma_dev -> device_config = ep93xx_dma_slave_config ;
1355
+ dma_dev -> device_synchronize = ep93xx_dma_synchronize ;
1328
1356
dma_dev -> device_terminate_all = ep93xx_dma_terminate_all ;
1329
1357
dma_dev -> device_issue_pending = ep93xx_dma_issue_pending ;
1330
1358
dma_dev -> device_tx_status = ep93xx_dma_tx_status ;
@@ -1342,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1342
1370
} else {
1343
1371
dma_cap_set (DMA_PRIVATE , dma_dev -> cap_mask );
1344
1372
1373
+ edma -> hw_synchronize = m2p_hw_synchronize ;
1345
1374
edma -> hw_setup = m2p_hw_setup ;
1346
1375
edma -> hw_shutdown = m2p_hw_shutdown ;
1347
1376
edma -> hw_submit = m2p_hw_submit ;
0 commit comments