@@ -59,7 +59,7 @@ struct dma_esp32_channel {
59
59
int periph_id ;
60
60
dma_callback_t cb ;
61
61
void * user_data ;
62
- dma_descriptor_t desc ;
62
+ dma_descriptor_t desc_list [ CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM ] ;
63
63
};
64
64
65
65
struct dma_esp32_config {
@@ -79,15 +79,20 @@ static void IRAM_ATTR dma_esp32_isr_handle_rx(const struct device *dev,
79
79
struct dma_esp32_channel * rx , uint32_t intr_status )
80
80
{
81
81
struct dma_esp32_data * data = (struct dma_esp32_data * const )(dev )-> data ;
82
+ uint32_t status ;
82
83
83
84
gdma_ll_rx_clear_interrupt_status (data -> hal .dev , rx -> channel_id , intr_status );
84
85
85
- if (intr_status & (GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE )) {
86
- intr_status &= ~(GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE );
86
+ if (intr_status == (GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE )) {
87
+ status = DMA_STATUS_COMPLETE ;
88
+ } else if (intr_status == GDMA_LL_EVENT_RX_DONE ) {
89
+ status = DMA_STATUS_BLOCK ;
90
+ } else {
91
+ status = - intr_status ;
87
92
}
88
93
89
94
if (rx -> cb ) {
90
- rx -> cb (dev , rx -> user_data , rx -> channel_id * 2 , - intr_status );
95
+ rx -> cb (dev , rx -> user_data , rx -> channel_id * 2 , status );
91
96
}
92
97
}
93
98
@@ -101,7 +106,7 @@ static void IRAM_ATTR dma_esp32_isr_handle_tx(const struct device *dev,
101
106
intr_status &= ~(GDMA_LL_EVENT_TX_TOTAL_EOF | GDMA_LL_EVENT_TX_DONE | GDMA_LL_EVENT_TX_EOF );
102
107
103
108
if (tx -> cb ) {
104
- tx -> cb (dev , tx -> user_data , tx -> channel_id * 2 + 1 , - intr_status );
109
+ tx -> cb (dev , tx -> user_data , tx -> channel_id * 2 + 1 , - intr_status );
105
110
}
106
111
}
107
112
@@ -127,17 +132,43 @@ static void IRAM_ATTR dma_esp32_isr_handle(const struct device *dev, uint8_t rx_
127
132
#endif
128
133
129
134
static int dma_esp32_config_rx_descriptor (struct dma_esp32_channel * dma_channel ,
130
- struct dma_block_config * block )
135
+ struct dma_block_config * block )
131
136
{
137
+ if (!block ) {
138
+ LOG_ERR ("At least one dma block is required" );
139
+ return - EINVAL ;
140
+ }
141
+
132
142
if (!esp_ptr_dma_capable ((uint32_t * )block -> dest_address )) {
133
143
LOG_ERR ("Rx buffer not in DMA capable memory: %p" , (uint32_t * )block -> dest_address );
134
144
return - EINVAL ;
135
145
}
136
146
137
- memset (& dma_channel -> desc , 0 , sizeof (dma_channel -> desc ));
138
- dma_channel -> desc .buffer = (void * )block -> dest_address ;
139
- dma_channel -> desc .dw0 .size = block -> block_size ;
140
- dma_channel -> desc .dw0 .owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA ;
147
+ dma_descriptor_t * desc_iter = dma_channel -> desc_list ;
148
+
149
+ for (int i = 0 ; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM ; ++ i ) {
150
+ if (block -> block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE ) {
151
+ LOG_ERR ("Size of block %d is too large" , i );
152
+ return - EINVAL ;
153
+ }
154
+ memset (desc_iter , 0 , sizeof (dma_descriptor_t ));
155
+ desc_iter -> buffer = (void * )block -> dest_address ;
156
+ desc_iter -> dw0 .size = block -> block_size ;
157
+ desc_iter -> dw0 .owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA ;
158
+ if (!block -> next_block ) {
159
+ desc_iter -> next = NULL ;
160
+ break ;
161
+ }
162
+ desc_iter -> next = desc_iter + 1 ;
163
+ desc_iter += 1 ;
164
+ block = block -> next_block ;
165
+ }
166
+
167
+ if (desc_iter -> next ) {
168
+ memset (dma_channel -> desc_list , 0 , sizeof (dma_channel -> desc_list ));
169
+ LOG_ERR ("Too many dma blocks. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM" );
170
+ return - EINVAL ;
171
+ }
141
172
142
173
return 0 ;
143
174
}
@@ -181,20 +212,46 @@ static int dma_esp32_config_rx(const struct device *dev, struct dma_esp32_channe
181
212
}
182
213
183
214
static int dma_esp32_config_tx_descriptor (struct dma_esp32_channel * dma_channel ,
184
- struct dma_block_config * block )
215
+ struct dma_block_config * block )
185
216
{
217
+ if (!block ) {
218
+ LOG_ERR ("At least one dma block is required" );
219
+ return - EINVAL ;
220
+ }
221
+
186
222
if (!esp_ptr_dma_capable ((uint32_t * )block -> source_address )) {
187
223
LOG_ERR ("Tx buffer not in DMA capable memory: %p" ,
188
224
(uint32_t * )block -> source_address );
189
225
return - EINVAL ;
190
226
}
191
227
192
- memset (& dma_channel -> desc , 0 , sizeof (dma_channel -> desc ));
193
- dma_channel -> desc .buffer = (void * )block -> source_address ;
194
- dma_channel -> desc .dw0 .size = block -> block_size ;
195
- dma_channel -> desc .dw0 .length = block -> block_size ;
196
- dma_channel -> desc .dw0 .suc_eof = 1 ;
197
- dma_channel -> desc .dw0 .owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA ;
228
+ dma_descriptor_t * desc_iter = dma_channel -> desc_list ;
229
+
230
+ for (int i = 0 ; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM ; ++ i ) {
231
+ if (block -> block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE ) {
232
+ LOG_ERR ("Size of block %d is too large" , i );
233
+ return - EINVAL ;
234
+ }
235
+ memset (desc_iter , 0 , sizeof (dma_descriptor_t ));
236
+ desc_iter -> buffer = (void * )block -> source_address ;
237
+ desc_iter -> dw0 .size = block -> block_size ;
238
+ desc_iter -> dw0 .length = block -> block_size ;
239
+ desc_iter -> dw0 .owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA ;
240
+ if (!block -> next_block ) {
241
+ desc_iter -> next = NULL ;
242
+ desc_iter -> dw0 .suc_eof = 1 ;
243
+ break ;
244
+ }
245
+ desc_iter -> next = desc_iter + 1 ;
246
+ desc_iter += 1 ;
247
+ block = block -> next_block ;
248
+ }
249
+
250
+ if (desc_iter -> next ) {
251
+ memset (dma_channel -> desc_list , 0 , sizeof (dma_channel -> desc_list ));
252
+ LOG_ERR ("Too many dma blocks. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM" );
253
+ return - EINVAL ;
254
+ }
198
255
199
256
return 0 ;
200
257
}
@@ -237,7 +294,7 @@ static int dma_esp32_config_tx(const struct device *dev, struct dma_esp32_channe
237
294
}
238
295
239
296
static int dma_esp32_config (const struct device * dev , uint32_t channel ,
240
- struct dma_config * config_dma )
297
+ struct dma_config * config_dma )
241
298
{
242
299
struct dma_esp32_config * config = (struct dma_esp32_config * )dev -> config ;
243
300
struct dma_esp32_data * data = (struct dma_esp32_data * const )(dev )-> data ;
@@ -320,24 +377,24 @@ static int dma_esp32_start(const struct device *dev, uint32_t channel)
320
377
GDMA_LL_EVENT_TX_EOF , true);
321
378
322
379
gdma_ll_rx_set_desc_addr (data -> hal .dev , dma_channel -> channel_id ,
323
- (int32_t )& dma_channel_rx -> desc );
380
+ (int32_t )dma_channel_rx -> desc_list );
324
381
gdma_ll_rx_start (data -> hal .dev , dma_channel -> channel_id );
325
382
326
383
gdma_ll_tx_set_desc_addr (data -> hal .dev , dma_channel -> channel_id ,
327
- (int32_t )& dma_channel_tx -> desc );
384
+ (int32_t )dma_channel_tx -> desc_list );
328
385
gdma_ll_tx_start (data -> hal .dev , dma_channel -> channel_id );
329
386
} else {
330
387
if (dma_channel -> dir == DMA_RX ) {
331
388
gdma_ll_rx_enable_interrupt (data -> hal .dev , dma_channel -> channel_id ,
332
389
UINT32_MAX , true);
333
390
gdma_ll_rx_set_desc_addr (data -> hal .dev , dma_channel -> channel_id ,
334
- (int32_t )& dma_channel -> desc );
391
+ (int32_t )dma_channel -> desc_list );
335
392
gdma_ll_rx_start (data -> hal .dev , dma_channel -> channel_id );
336
393
} else if (dma_channel -> dir == DMA_TX ) {
337
394
gdma_ll_tx_enable_interrupt (data -> hal .dev , dma_channel -> channel_id ,
338
395
GDMA_LL_EVENT_TX_EOF , true);
339
396
gdma_ll_tx_set_desc_addr (data -> hal .dev , dma_channel -> channel_id ,
340
- (int32_t )& dma_channel -> desc );
397
+ (int32_t )dma_channel -> desc_list );
341
398
gdma_ll_tx_start (data -> hal .dev , dma_channel -> channel_id );
342
399
} else {
343
400
LOG_ERR ("Channel %d is not configured" , channel );
@@ -382,11 +439,12 @@ static int dma_esp32_stop(const struct device *dev, uint32_t channel)
382
439
}
383
440
384
441
static int dma_esp32_get_status (const struct device * dev , uint32_t channel ,
385
- struct dma_status * status )
442
+ struct dma_status * status )
386
443
{
387
444
struct dma_esp32_config * config = (struct dma_esp32_config * )dev -> config ;
388
445
struct dma_esp32_data * data = (struct dma_esp32_data * const )(dev )-> data ;
389
446
struct dma_esp32_channel * dma_channel = & config -> dma_channel [channel ];
447
+ dma_descriptor_t * desc ;
390
448
391
449
if (channel >= config -> dma_channel_max ) {
392
450
LOG_ERR ("Unsupported channel" );
@@ -397,16 +455,27 @@ static int dma_esp32_get_status(const struct device *dev, uint32_t channel,
397
455
return - EINVAL ;
398
456
}
399
457
458
+ memset (status , 0 , sizeof (struct dma_status ));
459
+
400
460
if (dma_channel -> dir == DMA_RX ) {
401
461
status -> busy = !gdma_ll_rx_is_fsm_idle (data -> hal .dev , dma_channel -> channel_id );
402
462
status -> dir = PERIPHERAL_TO_MEMORY ;
403
- status -> read_position = dma_channel -> desc .dw0 .length ;
463
+ desc = (dma_descriptor_t * )gdma_ll_rx_get_current_desc_addr (
464
+ data -> hal .dev , dma_channel -> channel_id );
465
+ if (desc >= dma_channel -> desc_list ) {
466
+ status -> read_position = desc - dma_channel -> desc_list ;
467
+ status -> total_copied = desc -> dw0 .length
468
+ + dma_channel -> desc_list [0 ].dw0 .size
469
+ * status -> read_position ;
470
+ }
404
471
} else if (dma_channel -> dir == DMA_TX ) {
405
472
status -> busy = !gdma_ll_tx_is_fsm_idle (data -> hal .dev , dma_channel -> channel_id );
406
473
status -> dir = MEMORY_TO_PERIPHERAL ;
407
- status -> write_position = dma_channel -> desc .dw0 .length ;
408
- status -> total_copied = dma_channel -> desc .dw0 .length ;
409
- status -> pending_length = dma_channel -> desc .dw0 .size - dma_channel -> desc .dw0 .length ;
474
+ desc = (dma_descriptor_t * )gdma_ll_tx_get_current_desc_addr (
475
+ data -> hal .dev , dma_channel -> channel_id );
476
+ if (desc >= dma_channel -> desc_list ) {
477
+ status -> write_position = desc - dma_channel -> desc_list ;
478
+ }
410
479
}
411
480
412
481
return 0 ;
@@ -418,8 +487,8 @@ static int dma_esp32_reload(const struct device *dev, uint32_t channel, uint32_t
418
487
struct dma_esp32_config * config = (struct dma_esp32_config * )dev -> config ;
419
488
struct dma_esp32_data * data = (struct dma_esp32_data * const )(dev )-> data ;
420
489
struct dma_esp32_channel * dma_channel = & config -> dma_channel [channel ];
421
- struct dma_block_config block = { 0 } ;
422
- int err = 0 ;
490
+ dma_descriptor_t * desc_iter = dma_channel -> desc_list ;
491
+ uint32_t buf ;
423
492
424
493
if (channel >= config -> dma_channel_max ) {
425
494
LOG_ERR ("Unsupported channel" );
@@ -428,22 +497,40 @@ static int dma_esp32_reload(const struct device *dev, uint32_t channel, uint32_t
428
497
429
498
if (dma_channel -> dir == DMA_RX ) {
430
499
gdma_ll_rx_reset_channel (data -> hal .dev , dma_channel -> channel_id );
431
- block .block_size = size ;
432
- block .dest_address = dst ;
433
- err = dma_esp32_config_rx_descriptor (dma_channel , & block );
434
- if (err ) {
435
- LOG_ERR ("Error reloading RX channel (%d)" , err );
436
- return err ;
437
- }
500
+ buf = dst ;
438
501
} else if (dma_channel -> dir == DMA_TX ) {
439
502
gdma_ll_tx_reset_channel (data -> hal .dev , dma_channel -> channel_id );
440
- block .block_size = size ;
441
- block .source_address = src ;
442
- err = dma_esp32_config_tx_descriptor (dma_channel , & block );
443
- if (err ) {
444
- LOG_ERR ("Error reloading TX channel (%d)" , err );
445
- return err ;
503
+ buf = src ;
504
+ } else {
505
+ return - EINVAL ;
506
+ }
507
+
508
+ for (int i = 0 ; i < ARRAY_SIZE (dma_channel -> desc_list ); ++ i ) {
509
+ memset (desc_iter , 0 , sizeof (dma_descriptor_t ));
510
+ desc_iter -> buffer = (void * )(buf + DMA_DESCRIPTOR_BUFFER_MAX_SIZE * i );
511
+ desc_iter -> dw0 .owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA ;
512
+ if (size < DMA_DESCRIPTOR_BUFFER_MAX_SIZE ) {
513
+ desc_iter -> dw0 .size = size ;
514
+ if (dma_channel -> dir == DMA_TX ) {
515
+ desc_iter -> dw0 .length = size ;
516
+ desc_iter -> dw0 .suc_eof = 1 ;
517
+ }
518
+ desc_iter -> next = NULL ;
519
+ break ;
520
+ }
521
+ desc_iter -> dw0 .size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE ;
522
+ if (dma_channel -> dir == DMA_TX ) {
523
+ desc_iter -> dw0 .length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE ;
446
524
}
525
+ size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE ;
526
+ desc_iter -> next = desc_iter + 1 ;
527
+ desc_iter += 1 ;
528
+ }
529
+
530
+ if (desc_iter -> next ) {
531
+ memset (desc_iter , 0 , sizeof (dma_descriptor_t ));
532
+ LOG_ERR ("Not enough DMA descriptors. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM" );
533
+ return - EINVAL ;
447
534
}
448
535
449
536
return 0 ;
@@ -497,7 +584,7 @@ static int dma_esp32_init(const struct device *dev)
497
584
dma_channel -> cb = NULL ;
498
585
dma_channel -> dir = DMA_UNCONFIGURED ;
499
586
dma_channel -> periph_id = ESP_GDMA_TRIG_PERIPH_INVALID ;
500
- memset (& dma_channel -> desc , 0 , sizeof (dma_descriptor_t ));
587
+ memset (dma_channel -> desc_list , 0 , sizeof (dma_channel -> desc_list ));
501
588
}
502
589
503
590
gdma_hal_init (& data -> hal , 0 );
@@ -593,7 +680,7 @@ static void *irq_handlers[] = {
593
680
.dev = (gdma_dev_t *)DT_INST_REG_ADDR(idx), \
594
681
}, \
595
682
}; \
596
- \
683
+ \
597
684
DEVICE_DT_INST_DEFINE(idx, &dma_esp32_init, NULL, &dma_data_##idx, &dma_config_##idx, \
598
685
PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &dma_esp32_api);
599
686
0 commit comments