@@ -95,6 +95,17 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL);
95
95
#define UARTE_HAS_FRAME_TIMEOUT 1
96
96
#endif
97
97
98
+ /* Frame timeout has a bug that countdown counter may not be triggered in some
99
+ * specific condition. It may happen if RX is manually started after ENDRX (STOPRX
100
+ * task was not triggered) and there is ongoing reception of a byte. RXDRDY event
101
+ * triggered by the reception of that byte may not trigger frame timeout counter.
102
+ * If this is the last byte of a transfer then without the workaround there will
103
+ * be no expected RX timeout.
104
+ */
105
+ #ifdef UARTE_HAS_FRAME_TIMEOUT
106
+ #define RX_FRAMETIMEOUT_WORKAROUND 1
107
+ #endif
108
+
98
109
#define INSTANCE_NEEDS_CACHE_MGMT (unused , prefix , i , prop ) UARTE_IS_CACHEABLE(prefix##i)
99
110
100
111
#if UARTE_FOR_EACH_INSTANCE (INSTANCE_NEEDS_CACHE_MGMT , (+ ), (0 ), _ )
@@ -251,6 +262,9 @@ struct uarte_nrfx_data {
251
262
#define UARTE_FLAG_LOW_POWER (UARTE_FLAG_LOW_POWER_TX | UARTE_FLAG_LOW_POWER_RX)
252
263
#define UARTE_FLAG_TRIG_RXTO BIT(2)
253
264
#define UARTE_FLAG_POLL_OUT BIT(3)
265
+ /* Flag indicating that a workaround for not working frame timeout is active. */
266
+ #define UARTE_FLAG_FTIMEOUT_WATCH_BIT 4
267
+ #define UARTE_FLAG_FTIMEOUT_WATCH BIT(UARTE_FLAG_FTIMEOUT_WATCH_BIT)
254
268
255
269
/* If enabled then ENDTX is PPI'ed to TXSTOP */
256
270
#define UARTE_CFG_FLAG_PPI_ENDTX BIT(0)
@@ -1304,9 +1318,22 @@ static void rx_timeout(struct k_timer *timer)
1304
1318
NRF_UARTE_Type * uarte = get_uarte_instance (dev );
1305
1319
1306
1320
#ifdef UARTE_HAS_FRAME_TIMEOUT
1307
- if (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXDRDY )) {
1308
- nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_STOPRX );
1321
+ struct uarte_nrfx_data * data = dev -> data ;
1322
+ struct uarte_async_rx * async_rx = & data -> async -> rx ;
1323
+ bool rxdrdy = nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXDRDY );
1324
+
1325
+ if (IS_ENABLED (RX_FRAMETIMEOUT_WORKAROUND ) &&
1326
+ atomic_test_and_clear_bit (& data -> flags , UARTE_FLAG_FTIMEOUT_WATCH_BIT )) {
1327
+ if (rxdrdy ) {
1328
+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXDRDY );
1329
+ k_timer_start (& async_rx -> timer , async_rx -> timeout , K_NO_WAIT );
1330
+ }
1331
+ } else {
1332
+ if (!rxdrdy ) {
1333
+ nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_STOPRX );
1334
+ }
1309
1335
}
1336
+
1310
1337
return ;
1311
1338
#else /* UARTE_HAS_FRAME_TIMEOUT */
1312
1339
struct uarte_nrfx_data * data = dev -> data ;
@@ -1532,6 +1559,7 @@ static void endrx_isr(const struct device *dev)
1532
1559
async_rx -> offset = 0 ;
1533
1560
1534
1561
if (async_rx -> enabled ) {
1562
+ bool start_timeout = false;
1535
1563
/* If there is a next buffer, then STARTRX will have already been
1536
1564
* invoked by the short (the next buffer will be filling up already)
1537
1565
* and here we just do the swap of which buffer the driver is following,
@@ -1546,6 +1574,11 @@ static void endrx_isr(const struct device *dev)
1546
1574
*/
1547
1575
if (!nrf_uarte_event_check (uarte , NRF_UARTE_EVENT_RXSTARTED )) {
1548
1576
nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_STARTRX );
1577
+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXTO );
1578
+ if (IS_ENABLED (RX_FRAMETIMEOUT_WORKAROUND )) {
1579
+ data -> flags |= BIT (UARTE_FLAG_FTIMEOUT_WATCH_BIT );
1580
+ start_timeout = true;
1581
+ }
1549
1582
}
1550
1583
/* Remove the short until the subsequent next buffer is setup */
1551
1584
nrf_uarte_shorts_disable (uarte , NRF_UARTE_SHORT_ENDRX_STARTRX );
@@ -1554,6 +1587,11 @@ static void endrx_isr(const struct device *dev)
1554
1587
}
1555
1588
1556
1589
irq_unlock (key );
1590
+ if (IS_ENABLED (UARTE_HAS_FRAME_TIMEOUT )) {
1591
+ if (start_timeout && !K_TIMEOUT_EQ (async_rx -> timeout , K_NO_WAIT )) {
1592
+ k_timer_start (& async_rx -> timer , async_rx -> timeout , K_NO_WAIT );
1593
+ }
1594
+ }
1557
1595
}
1558
1596
1559
1597
#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX )
@@ -1611,6 +1649,12 @@ static void rxto_isr(const struct device *dev)
1611
1649
struct uarte_nrfx_data * data = dev -> data ;
1612
1650
struct uarte_async_rx * async_rx = & data -> async -> rx ;
1613
1651
1652
+ if (IS_ENABLED (RX_FRAMETIMEOUT_WORKAROUND )) {
1653
+ if (atomic_test_and_clear_bit (& data -> flags , UARTE_FLAG_FTIMEOUT_WATCH )) {
1654
+ k_timer_stop (& async_rx -> timer );
1655
+ }
1656
+ }
1657
+
1614
1658
if (async_rx -> buf ) {
1615
1659
#ifdef CONFIG_HAS_NORDIC_DMM
1616
1660
(void )dmm_buffer_in_release (config -> mem_reg , async_rx -> usr_buf , 0 , async_rx -> buf );
0 commit comments