@@ -99,6 +99,10 @@ uint32_t lpuartdiv_calc(const uint64_t clock_rate, const uint32_t baud_rate)
99
99
#endif /* USART_PRESC_PRESCALER */
100
100
#endif /* HAS_LPUART */
101
101
102
+ #ifdef CONFIG_UART_ASYNC_API
103
+ #define STM32_ASYNC_STATUS_TIMEOUT (DMA_STATUS_BLOCK + 1)
104
+ #endif
105
+
102
106
#ifdef CONFIG_PM
103
107
static void uart_stm32_pm_policy_state_lock_get (const struct device * dev )
104
108
{
@@ -1117,11 +1121,16 @@ static inline void async_evt_rx_rdy(struct uart_stm32_data *data)
1117
1121
.data .rx .offset = data -> dma_rx .offset
1118
1122
};
1119
1123
1120
- /* update the current pos for new data */
1121
- data -> dma_rx .offset = data -> dma_rx .counter ;
1124
+ /* When cyclic DMA is used, buffer positions are not updated - call callback every time*/
1125
+ if (data -> dma_rx .dma_cfg .cyclic == 0 ) {
1126
+ /* update the current pos for new data */
1127
+ data -> dma_rx .offset = data -> dma_rx .counter ;
1122
1128
1123
- /* send event only for new data */
1124
- if (event .data .rx .len > 0 ) {
1129
+ /* send event only for new data */
1130
+ if (event .data .rx .len > 0 ) {
1131
+ async_user_callback (data , & event );
1132
+ }
1133
+ } else {
1125
1134
async_user_callback (data , & event );
1126
1135
}
1127
1136
}
@@ -1204,20 +1213,45 @@ static inline void async_timer_start(struct k_work_delayable *work,
1204
1213
}
1205
1214
}
1206
1215
1207
- static void uart_stm32_dma_rx_flush (const struct device * dev )
1216
+ static void uart_stm32_dma_rx_flush (const struct device * dev , int status )
1208
1217
{
1209
1218
struct dma_status stat ;
1210
1219
struct uart_stm32_data * data = dev -> data ;
1211
1220
1212
- if (dma_get_status (data -> dma_rx .dma_dev ,
1213
- data -> dma_rx .dma_channel , & stat ) == 0 ) {
1214
- size_t rx_rcv_len = data -> dma_rx .buffer_length -
1215
- stat .pending_length ;
1216
- if (rx_rcv_len > data -> dma_rx .offset ) {
1217
- data -> dma_rx .counter = rx_rcv_len ;
1221
+ size_t rx_rcv_len = 0 ;
1218
1222
1219
- async_evt_rx_rdy (data );
1223
+ switch (status ) {
1224
+ case DMA_STATUS_COMPLETE :
1225
+ /* fully complete */
1226
+ data -> dma_rx .counter = data -> dma_rx .buffer_length ;
1227
+ break ;
1228
+ case DMA_STATUS_BLOCK :
1229
+ /* half complete */
1230
+ data -> dma_rx .counter = data -> dma_rx .buffer_length / 2 ;
1231
+
1232
+ break ;
1233
+ default : /* likely STM32_ASYNC_STATUS_TIMEOUT */
1234
+ if (dma_get_status (data -> dma_rx .dma_dev , data -> dma_rx .dma_channel , & stat ) == 0 ) {
1235
+ rx_rcv_len = data -> dma_rx .buffer_length - stat .pending_length ;
1236
+ data -> dma_rx .counter = rx_rcv_len ;
1220
1237
}
1238
+ break ;
1239
+ }
1240
+
1241
+ async_evt_rx_rdy (data );
1242
+
1243
+ switch (status ) { /* update offset*/
1244
+ case DMA_STATUS_COMPLETE :
1245
+ /* fully complete */
1246
+ data -> dma_rx .offset = 0 ;
1247
+ break ;
1248
+ case DMA_STATUS_BLOCK :
1249
+ /* half complete */
1250
+ data -> dma_rx .offset = data -> dma_rx .buffer_length / 2 ;
1251
+ break ;
1252
+ default : /* likely STM32_ASYNC_STATUS_TIMEOUT */
1253
+ data -> dma_rx .offset += rx_rcv_len - data -> dma_rx .offset ;
1254
+ break ;
1221
1255
}
1222
1256
}
1223
1257
@@ -1269,7 +1303,7 @@ static void uart_stm32_isr(const struct device *dev)
1269
1303
LOG_DBG ("idle interrupt occurred" );
1270
1304
1271
1305
if (data -> dma_rx .timeout == 0 ) {
1272
- uart_stm32_dma_rx_flush (dev );
1306
+ uart_stm32_dma_rx_flush (dev , STM32_ASYNC_STATUS_TIMEOUT );
1273
1307
} else {
1274
1308
/* Start the RX timer not null */
1275
1309
async_timer_start (& data -> dma_rx .timeout_work ,
@@ -1417,7 +1451,7 @@ static int uart_stm32_async_rx_disable(const struct device *dev)
1417
1451
1418
1452
LL_USART_DisableIT_IDLE (usart );
1419
1453
1420
- uart_stm32_dma_rx_flush (dev );
1454
+ uart_stm32_dma_rx_flush (dev , STM32_ASYNC_STATUS_TIMEOUT );
1421
1455
1422
1456
async_evt_rx_buf_release (data );
1423
1457
@@ -1517,27 +1551,32 @@ void uart_stm32_dma_rx_cb(const struct device *dma_dev, void *user_data,
1517
1551
1518
1552
(void )k_work_cancel_delayable (& data -> dma_rx .timeout_work );
1519
1553
1520
- /* true since this functions occurs when buffer if full */
1521
- data -> dma_rx .counter = data -> dma_rx . buffer_length ;
1554
+ /* If we are in NORMAL MODE */
1555
+ if ( data -> dma_rx .dma_cfg . cyclic == 0 ) {
1522
1556
1523
- async_evt_rx_rdy (data );
1524
-
1525
- if (data -> rx_next_buffer != NULL ) {
1526
- async_evt_rx_buf_release (data );
1557
+ /* true since this functions occurs when buffer is full */
1558
+ data -> dma_rx .counter = data -> dma_rx .buffer_length ;
1559
+ async_evt_rx_rdy (data );
1560
+ if (data -> rx_next_buffer != NULL ) {
1561
+ async_evt_rx_buf_release (data );
1527
1562
1528
- /* replace the buffer when the current
1529
- * is full and not the same as the next
1530
- * one.
1531
- */
1532
- uart_stm32_dma_replace_buffer (uart_dev );
1563
+ /* replace the buffer when the current
1564
+ * is full and not the same as the next
1565
+ * one.
1566
+ */
1567
+ uart_stm32_dma_replace_buffer (uart_dev );
1568
+ } else {
1569
+ /* Buffer full without valid next buffer,
1570
+ * an UART_RX_DISABLED event must be generated,
1571
+ * but uart_stm32_async_rx_disable() cannot be
1572
+ * called in ISR context. So force the RX timeout
1573
+ * to minimum value and let the RX timeout to do the job.
1574
+ */
1575
+ k_work_reschedule (& data -> dma_rx .timeout_work , K_TICKS (1 ));
1576
+ }
1533
1577
} else {
1534
- /* Buffer full without valid next buffer,
1535
- * an UART_RX_DISABLED event must be generated,
1536
- * but uart_stm32_async_rx_disable() cannot be
1537
- * called in ISR context. So force the RX timeout
1538
- * to minimum value and let the RX timeout to do the job.
1539
- */
1540
- k_work_reschedule (& data -> dma_rx .timeout_work , K_TICKS (1 ));
1578
+ /* CIRCULAR MODE */
1579
+ uart_stm32_dma_rx_flush (data -> uart_dev , status );
1541
1580
}
1542
1581
}
1543
1582
@@ -1722,7 +1761,7 @@ static void uart_stm32_async_rx_timeout(struct k_work *work)
1722
1761
if (data -> dma_rx .counter == data -> dma_rx .buffer_length ) {
1723
1762
uart_stm32_async_rx_disable (dev );
1724
1763
} else {
1725
- uart_stm32_dma_rx_flush (dev );
1764
+ uart_stm32_dma_rx_flush (dev , STM32_ASYNC_STATUS_TIMEOUT );
1726
1765
}
1727
1766
}
1728
1767
@@ -1829,9 +1868,10 @@ static int uart_stm32_async_init(const struct device *dev)
1829
1868
data -> dma_rx .blk_cfg .dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE ;
1830
1869
}
1831
1870
1832
- /* RX disable circular buffer */
1833
- data -> dma_rx .blk_cfg .source_reload_en = 0 ;
1834
- data -> dma_rx .blk_cfg .dest_reload_en = 0 ;
1871
+ /* Enable/disable RX circular buffer */
1872
+ data -> dma_rx .blk_cfg .source_reload_en = data -> dma_rx .dma_cfg .cyclic ;
1873
+ data -> dma_rx .blk_cfg .dest_reload_en = data -> dma_rx .dma_cfg .cyclic ;
1874
+
1835
1875
data -> dma_rx .blk_cfg .fifo_mode_control = data -> dma_rx .fifo_threshold ;
1836
1876
1837
1877
data -> dma_rx .dma_cfg .head_block = & data -> dma_rx .blk_cfg ;
@@ -1868,6 +1908,10 @@ static int uart_stm32_async_init(const struct device *dev)
1868
1908
data -> dma_tx .blk_cfg .dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE ;
1869
1909
}
1870
1910
1911
+ /* Enable/disable TX circular buffer */
1912
+ data -> dma_tx .blk_cfg .source_reload_en = data -> dma_tx .dma_cfg .cyclic ;
1913
+ data -> dma_tx .blk_cfg .dest_reload_en = data -> dma_tx .dma_cfg .cyclic ;
1914
+
1871
1915
data -> dma_tx .blk_cfg .fifo_mode_control = data -> dma_tx .fifo_threshold ;
1872
1916
1873
1917
data -> dma_tx .dma_cfg .head_block = & data -> dma_tx .blk_cfg ;
@@ -2225,6 +2269,8 @@ static int uart_stm32_pm_action(const struct device *dev,
2225
2269
.dma_slot = STM32_DMA_SLOT(index, dir, slot),\
2226
2270
.channel_direction = STM32_DMA_CONFIG_DIRECTION( \
2227
2271
STM32_DMA_CHANNEL_CONFIG(index, dir)),\
2272
+ .cyclic = STM32_DMA_CONFIG_CYCLIC( \
2273
+ STM32_DMA_CHANNEL_CONFIG(index, dir)), \
2228
2274
.channel_priority = STM32_DMA_CONFIG_PRIORITY( \
2229
2275
STM32_DMA_CHANNEL_CONFIG(index, dir)), \
2230
2276
.source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE(\
0 commit comments