@@ -244,18 +244,18 @@ struct lpuart_port {
244
244
struct dma_chan * dma_rx_chan ;
245
245
struct dma_async_tx_descriptor * dma_tx_desc ;
246
246
struct dma_async_tx_descriptor * dma_rx_desc ;
247
- dma_addr_t dma_tx_buf_bus ;
248
247
dma_cookie_t dma_tx_cookie ;
249
248
dma_cookie_t dma_rx_cookie ;
250
- unsigned char * dma_tx_buf_virt ;
251
249
unsigned int dma_tx_bytes ;
252
250
unsigned int dma_rx_bytes ;
253
- int dma_tx_in_progress ;
251
+ bool dma_tx_in_progress ;
254
252
unsigned int dma_rx_timeout ;
255
253
struct timer_list lpuart_timer ;
256
- struct scatterlist rx_sgl ;
254
+ struct scatterlist rx_sgl , tx_sgl [ 2 ] ;
257
255
struct circ_buf rx_ring ;
258
256
int rx_dma_rng_buf_len ;
257
+ unsigned int dma_tx_nents ;
258
+ wait_queue_head_t dma_wait ;
259
259
};
260
260
261
261
static const struct of_device_id lpuart_dt_ids [] = {
@@ -316,103 +316,118 @@ static void lpuart32_stop_rx(struct uart_port *port)
316
316
lpuart32_write (temp & ~UARTCTRL_RE , port -> membase + UARTCTRL );
317
317
}
318
318
319
- static void lpuart_pio_tx (struct lpuart_port * sport )
319
+ static void lpuart_dma_tx (struct lpuart_port * sport )
320
320
{
321
321
struct circ_buf * xmit = & sport -> port .state -> xmit ;
322
- unsigned long flags ;
323
-
324
- spin_lock_irqsave ( & sport -> port . lock , flags ) ;
322
+ struct scatterlist * sgl = sport -> tx_sgl ;
323
+ struct device * dev = sport -> port . dev ;
324
+ int ret ;
325
325
326
- while (!uart_circ_empty (xmit ) &&
327
- readb (sport -> port .membase + UARTTCFIFO ) < sport -> txfifo_size ) {
328
- writeb (xmit -> buf [xmit -> tail ], sport -> port .membase + UARTDR );
329
- xmit -> tail = (xmit -> tail + 1 ) & (UART_XMIT_SIZE - 1 );
330
- sport -> port .icount .tx ++ ;
331
- }
326
+ if (sport -> dma_tx_in_progress )
327
+ return ;
332
328
333
- if (uart_circ_chars_pending (xmit ) < WAKEUP_CHARS )
334
- uart_write_wakeup (& sport -> port );
329
+ sport -> dma_tx_bytes = uart_circ_chars_pending (xmit );
335
330
336
- if (uart_circ_empty (xmit ))
337
- writeb (readb (sport -> port .membase + UARTCR5 ) | UARTCR5_TDMAS ,
338
- sport -> port .membase + UARTCR5 );
331
+ if (xmit -> tail < xmit -> head ) {
332
+ sport -> dma_tx_nents = 1 ;
333
+ sg_init_one (sgl , xmit -> buf + xmit -> tail , sport -> dma_tx_bytes );
334
+ } else {
335
+ sport -> dma_tx_nents = 2 ;
336
+ sg_init_table (sgl , 2 );
337
+ sg_set_buf (sgl , xmit -> buf + xmit -> tail ,
338
+ UART_XMIT_SIZE - xmit -> tail );
339
+ sg_set_buf (sgl + 1 , xmit -> buf , xmit -> head );
340
+ }
339
341
340
- spin_unlock_irqrestore (& sport -> port .lock , flags );
341
- }
342
+ ret = dma_map_sg (dev , sgl , sport -> dma_tx_nents , DMA_TO_DEVICE );
343
+ if (!ret ) {
344
+ dev_err (dev , "DMA mapping error for TX.\n" );
345
+ return ;
346
+ }
342
347
343
- static int lpuart_dma_tx (struct lpuart_port * sport , unsigned long count )
344
- {
345
- struct circ_buf * xmit = & sport -> port .state -> xmit ;
346
- dma_addr_t tx_bus_addr ;
347
-
348
- dma_sync_single_for_device (sport -> port .dev , sport -> dma_tx_buf_bus ,
349
- UART_XMIT_SIZE , DMA_TO_DEVICE );
350
- sport -> dma_tx_bytes = count & ~(sport -> txfifo_size - 1 );
351
- tx_bus_addr = sport -> dma_tx_buf_bus + xmit -> tail ;
352
- sport -> dma_tx_desc = dmaengine_prep_slave_single (sport -> dma_tx_chan ,
353
- tx_bus_addr , sport -> dma_tx_bytes ,
348
+ sport -> dma_tx_desc = dmaengine_prep_slave_sg (sport -> dma_tx_chan , sgl ,
349
+ sport -> dma_tx_nents ,
354
350
DMA_MEM_TO_DEV , DMA_PREP_INTERRUPT );
355
-
356
351
if (!sport -> dma_tx_desc ) {
357
- dev_err (sport -> port .dev , "Not able to get desc for tx\n" );
358
- return - EIO ;
352
+ dma_unmap_sg (dev , sgl , sport -> dma_tx_nents , DMA_TO_DEVICE );
353
+ dev_err (dev , "Cannot prepare TX slave DMA!\n" );
354
+ return ;
359
355
}
360
356
361
357
sport -> dma_tx_desc -> callback = lpuart_dma_tx_complete ;
362
358
sport -> dma_tx_desc -> callback_param = sport ;
363
- sport -> dma_tx_in_progress = 1 ;
359
+ sport -> dma_tx_in_progress = true ;
364
360
sport -> dma_tx_cookie = dmaengine_submit (sport -> dma_tx_desc );
365
361
dma_async_issue_pending (sport -> dma_tx_chan );
366
362
367
- return 0 ;
368
- }
369
-
370
- static void lpuart_prepare_tx (struct lpuart_port * sport )
371
- {
372
- struct circ_buf * xmit = & sport -> port .state -> xmit ;
373
- unsigned long count = CIRC_CNT_TO_END (xmit -> head ,
374
- xmit -> tail , UART_XMIT_SIZE );
375
-
376
- if (!count )
377
- return ;
378
-
379
- if (count < sport -> txfifo_size )
380
- writeb (readb (sport -> port .membase + UARTCR5 ) & ~UARTCR5_TDMAS ,
381
- sport -> port .membase + UARTCR5 );
382
- else {
383
- writeb (readb (sport -> port .membase + UARTCR5 ) | UARTCR5_TDMAS ,
384
- sport -> port .membase + UARTCR5 );
385
- lpuart_dma_tx (sport , count );
386
- }
387
363
}
388
364
389
365
static void lpuart_dma_tx_complete (void * arg )
390
366
{
391
367
struct lpuart_port * sport = arg ;
368
+ struct scatterlist * sgl = & sport -> tx_sgl [0 ];
392
369
struct circ_buf * xmit = & sport -> port .state -> xmit ;
393
370
unsigned long flags ;
394
371
395
- async_tx_ack (sport -> dma_tx_desc );
396
-
397
372
spin_lock_irqsave (& sport -> port .lock , flags );
398
373
374
+ dma_unmap_sg (sport -> port .dev , sgl , sport -> dma_tx_nents , DMA_TO_DEVICE );
375
+
399
376
xmit -> tail = (xmit -> tail + sport -> dma_tx_bytes ) & (UART_XMIT_SIZE - 1 );
400
- sport -> dma_tx_in_progress = 0 ;
377
+
378
+ sport -> port .icount .tx += sport -> dma_tx_bytes ;
379
+ sport -> dma_tx_in_progress = false;
380
+ spin_unlock_irqrestore (& sport -> port .lock , flags );
401
381
402
382
if (uart_circ_chars_pending (xmit ) < WAKEUP_CHARS )
403
383
uart_write_wakeup (& sport -> port );
404
384
405
- lpuart_prepare_tx (sport );
385
+ if (waitqueue_active (& sport -> dma_wait )) {
386
+ wake_up (& sport -> dma_wait );
387
+ return ;
388
+ }
389
+
390
+ spin_lock_irqsave (& sport -> port .lock , flags );
391
+
392
+ if (!uart_circ_empty (xmit ) && !uart_tx_stopped (& sport -> port ))
393
+ lpuart_dma_tx (sport );
406
394
407
395
spin_unlock_irqrestore (& sport -> port .lock , flags );
408
396
}
409
397
398
+ static int lpuart_dma_tx_request (struct uart_port * port )
399
+ {
400
+ struct lpuart_port * sport = container_of (port ,
401
+ struct lpuart_port , port );
402
+ struct dma_slave_config dma_tx_sconfig = {};
403
+ int ret ;
404
+
405
+ dma_tx_sconfig .dst_addr = sport -> port .mapbase + UARTDR ;
406
+ dma_tx_sconfig .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
407
+ dma_tx_sconfig .dst_maxburst = 1 ;
408
+ dma_tx_sconfig .direction = DMA_MEM_TO_DEV ;
409
+ ret = dmaengine_slave_config (sport -> dma_tx_chan , & dma_tx_sconfig );
410
+
411
+ if (ret ) {
412
+ dev_err (sport -> port .dev ,
413
+ "DMA slave config failed, err = %d\n" , ret );
414
+ return ret ;
415
+ }
416
+
417
+ return 0 ;
418
+ }
419
+
410
420
static void lpuart_flush_buffer (struct uart_port * port )
411
421
{
412
422
struct lpuart_port * sport = container_of (port , struct lpuart_port , port );
423
+
413
424
if (sport -> lpuart_dma_tx_use ) {
425
+ if (sport -> dma_tx_in_progress ) {
426
+ dma_unmap_sg (sport -> port .dev , & sport -> tx_sgl [0 ],
427
+ sport -> dma_tx_nents , DMA_TO_DEVICE );
428
+ sport -> dma_tx_in_progress = false;
429
+ }
414
430
dmaengine_terminate_all (sport -> dma_tx_chan );
415
- sport -> dma_tx_in_progress = 0 ;
416
431
}
417
432
}
418
433
@@ -469,8 +484,8 @@ static void lpuart_start_tx(struct uart_port *port)
469
484
writeb (temp | UARTCR2_TIE , port -> membase + UARTCR2 );
470
485
471
486
if (sport -> lpuart_dma_tx_use ) {
472
- if (!uart_circ_empty (xmit ) && !sport -> dma_tx_in_progress )
473
- lpuart_prepare_tx (sport );
487
+ if (!uart_circ_empty (xmit ) && !uart_tx_stopped ( port ) )
488
+ lpuart_dma_tx (sport );
474
489
} else {
475
490
if (readb (port -> membase + UARTSR1 ) & UARTSR1_TDRE )
476
491
lpuart_transmit_buffer (sport );
@@ -489,6 +504,29 @@ static void lpuart32_start_tx(struct uart_port *port)
489
504
lpuart32_transmit_buffer (sport );
490
505
}
491
506
507
+ /* return TIOCSER_TEMT when transmitter is not busy */
508
+ static unsigned int lpuart_tx_empty (struct uart_port * port )
509
+ {
510
+ struct lpuart_port * sport = container_of (port ,
511
+ struct lpuart_port , port );
512
+ unsigned char sr1 = readb (port -> membase + UARTSR1 );
513
+ unsigned char sfifo = readb (port -> membase + UARTSFIFO );
514
+
515
+ if (sport -> dma_tx_in_progress )
516
+ return 0 ;
517
+
518
+ if (sr1 & UARTSR1_TC && sfifo & UARTSFIFO_TXEMPT )
519
+ return TIOCSER_TEMT ;
520
+
521
+ return 0 ;
522
+ }
523
+
524
+ static unsigned int lpuart32_tx_empty (struct uart_port * port )
525
+ {
526
+ return (lpuart32_read (port -> membase + UARTSTAT ) & UARTSTAT_TC ) ?
527
+ TIOCSER_TEMT : 0 ;
528
+ }
529
+
492
530
static irqreturn_t lpuart_txint (int irq , void * dev_id )
493
531
{
494
532
struct lpuart_port * sport = dev_id ;
@@ -662,12 +700,8 @@ static irqreturn_t lpuart_int(int irq, void *dev_id)
662
700
if (sts & UARTSR1_RDRF )
663
701
lpuart_rxint (irq , dev_id );
664
702
665
- if (sts & UARTSR1_TDRE ) {
666
- if (sport -> lpuart_dma_tx_use )
667
- lpuart_pio_tx (sport );
668
- else
669
- lpuart_txint (irq , dev_id );
670
- }
703
+ if (sts & UARTSR1_TDRE )
704
+ lpuart_txint (irq , dev_id );
671
705
672
706
return IRQ_HANDLED ;
673
707
}
@@ -692,29 +726,6 @@ static irqreturn_t lpuart32_int(int irq, void *dev_id)
692
726
return IRQ_HANDLED ;
693
727
}
694
728
695
- /* return TIOCSER_TEMT when transmitter is not busy */
696
- static unsigned int lpuart_tx_empty (struct uart_port * port )
697
- {
698
- struct lpuart_port * sport = container_of (port ,
699
- struct lpuart_port , port );
700
- unsigned char sr1 = readb (port -> membase + UARTSR1 );
701
- unsigned char sfifo = readb (port -> membase + UARTSFIFO );
702
-
703
- if (sport -> dma_tx_in_progress )
704
- return 0 ;
705
-
706
- if (sr1 & UARTSR1_TC && sfifo & UARTSFIFO_TXEMPT )
707
- return TIOCSER_TEMT ;
708
-
709
- return 0 ;
710
- }
711
-
712
- static unsigned int lpuart32_tx_empty (struct uart_port * port )
713
- {
714
- return (lpuart32_read (port -> membase + UARTSTAT ) & UARTSTAT_TC ) ?
715
- TIOCSER_TEMT : 0 ;
716
- }
717
-
718
729
static void lpuart_copy_rx_to_tty (struct lpuart_port * sport )
719
730
{
720
731
struct tty_port * port = & sport -> port .state -> port ;
@@ -890,18 +901,6 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
890
901
return 0 ;
891
902
}
892
903
893
- static void lpuart_dma_tx_free (struct uart_port * port )
894
- {
895
- struct lpuart_port * sport = container_of (port ,
896
- struct lpuart_port , port );
897
-
898
- dma_unmap_single (sport -> port .dev , sport -> dma_tx_buf_bus ,
899
- UART_XMIT_SIZE , DMA_TO_DEVICE );
900
-
901
- sport -> dma_tx_buf_bus = 0 ;
902
- sport -> dma_tx_buf_virt = NULL ;
903
- }
904
-
905
904
static void lpuart_dma_rx_free (struct uart_port * port )
906
905
{
907
906
struct lpuart_port * sport = container_of (port ,
@@ -1061,44 +1060,6 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
1061
1060
lpuart32_write (ctrl_saved , sport -> port .membase + UARTCTRL );
1062
1061
}
1063
1062
1064
- static int lpuart_dma_tx_request (struct uart_port * port )
1065
- {
1066
- struct lpuart_port * sport = container_of (port ,
1067
- struct lpuart_port , port );
1068
- struct dma_slave_config dma_tx_sconfig ;
1069
- dma_addr_t dma_bus ;
1070
- unsigned char * dma_buf ;
1071
- int ret ;
1072
-
1073
- dma_bus = dma_map_single (sport -> dma_tx_chan -> device -> dev ,
1074
- sport -> port .state -> xmit .buf ,
1075
- UART_XMIT_SIZE , DMA_TO_DEVICE );
1076
-
1077
- if (dma_mapping_error (sport -> dma_tx_chan -> device -> dev , dma_bus )) {
1078
- dev_err (sport -> port .dev , "dma_map_single tx failed\n" );
1079
- return - ENOMEM ;
1080
- }
1081
-
1082
- dma_buf = sport -> port .state -> xmit .buf ;
1083
- dma_tx_sconfig .dst_addr = sport -> port .mapbase + UARTDR ;
1084
- dma_tx_sconfig .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
1085
- dma_tx_sconfig .dst_maxburst = sport -> txfifo_size ;
1086
- dma_tx_sconfig .direction = DMA_MEM_TO_DEV ;
1087
- ret = dmaengine_slave_config (sport -> dma_tx_chan , & dma_tx_sconfig );
1088
-
1089
- if (ret < 0 ) {
1090
- dev_err (sport -> port .dev ,
1091
- "Dma slave config failed, err = %d\n" , ret );
1092
- return ret ;
1093
- }
1094
-
1095
- sport -> dma_tx_buf_virt = dma_buf ;
1096
- sport -> dma_tx_buf_bus = dma_bus ;
1097
- sport -> dma_tx_in_progress = 0 ;
1098
-
1099
- return 0 ;
1100
- }
1101
-
1102
1063
static void rx_dma_timer_init (struct lpuart_port * sport )
1103
1064
{
1104
1065
setup_timer (& sport -> lpuart_timer , lpuart_timer_func ,
@@ -1151,6 +1112,7 @@ static int lpuart_startup(struct uart_port *port)
1151
1112
}
1152
1113
1153
1114
if (sport -> dma_tx_chan && !lpuart_dma_tx_request (port )) {
1115
+ init_waitqueue_head (& sport -> dma_wait );
1154
1116
sport -> lpuart_dma_tx_use = true;
1155
1117
temp = readb (port -> membase + UARTCR5 );
1156
1118
writeb (temp | UARTCR5_TDMAS , port -> membase + UARTCR5 );
@@ -1220,8 +1182,15 @@ static void lpuart_shutdown(struct uart_port *port)
1220
1182
lpuart_dma_rx_free (& sport -> port );
1221
1183
}
1222
1184
1223
- if (sport -> lpuart_dma_tx_use )
1224
- lpuart_dma_tx_free (& sport -> port );
1185
+ if (sport -> lpuart_dma_tx_use ) {
1186
+ if (wait_event_interruptible (sport -> dma_wait ,
1187
+ !sport -> dma_tx_in_progress ) != false) {
1188
+ sport -> dma_tx_in_progress = false;
1189
+ dmaengine_terminate_all (sport -> dma_tx_chan );
1190
+ }
1191
+
1192
+ lpuart_stop_tx (port );
1193
+ }
1225
1194
}
1226
1195
1227
1196
static void lpuart32_shutdown (struct uart_port * port )
0 commit comments