Skip to content

Commit 237c49a

Browse files
epc-akenashif
authored andcommitted
drivers: dma: esp32: added support for multiple descriptors
Previously, configuring the GDMA was limited to a single descriptor, restricting memory transfers to a maximum of 4kB. This update introduces support for multiple descriptors, enabling users to define multiple dma_blocks. The maximum number of descriptors can be configured via the CONFIG_DMA_ESP32_DESCRIPTOR_NUM option. Additionally, the dma_get_status() function now reports the index of the currently processed descriptor through the status.read_position and status.write_position fields. Signed-off-by: Armin Kessler <[email protected]>
1 parent 009d02c commit 237c49a

File tree

2 files changed

+137
-44
lines changed

2 files changed

+137
-44
lines changed

drivers/dma/Kconfig.esp32

+6
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,9 @@ config DMA_ESP32
77
default y
88
help
99
General Purpose DMA for ESP32 series.
10+
11+
config DMA_ESP32_MAX_DESCRIPTOR_NUM
12+
int "Maximal number of available DMA descriptors"
13+
default 16
14+
help
15+
Reserves memory for a maximal number of descriptors

drivers/dma/dma_esp32_gdma.c

+131-44
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ struct dma_esp32_channel {
5959
int periph_id;
6060
dma_callback_t cb;
6161
void *user_data;
62-
dma_descriptor_t desc;
62+
dma_descriptor_t desc_list[CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM];
6363
};
6464

6565
struct dma_esp32_config {
@@ -79,15 +79,20 @@ static void IRAM_ATTR dma_esp32_isr_handle_rx(const struct device *dev,
7979
struct dma_esp32_channel *rx, uint32_t intr_status)
8080
{
8181
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
82+
uint32_t status;
8283

8384
gdma_ll_rx_clear_interrupt_status(data->hal.dev, rx->channel_id, intr_status);
8485

85-
if (intr_status & (GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE)) {
86-
intr_status &= ~(GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE);
86+
if (intr_status == (GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE)) {
87+
status = DMA_STATUS_COMPLETE;
88+
} else if (intr_status == GDMA_LL_EVENT_RX_DONE) {
89+
status = DMA_STATUS_BLOCK;
90+
} else {
91+
status = -intr_status;
8792
}
8893

8994
if (rx->cb) {
90-
rx->cb(dev, rx->user_data, rx->channel_id*2, -intr_status);
95+
rx->cb(dev, rx->user_data, rx->channel_id * 2, status);
9196
}
9297
}
9398

@@ -101,7 +106,7 @@ static void IRAM_ATTR dma_esp32_isr_handle_tx(const struct device *dev,
101106
intr_status &= ~(GDMA_LL_EVENT_TX_TOTAL_EOF | GDMA_LL_EVENT_TX_DONE | GDMA_LL_EVENT_TX_EOF);
102107

103108
if (tx->cb) {
104-
tx->cb(dev, tx->user_data, tx->channel_id*2 + 1, -intr_status);
109+
tx->cb(dev, tx->user_data, tx->channel_id * 2 + 1, -intr_status);
105110
}
106111
}
107112

@@ -127,17 +132,43 @@ static void IRAM_ATTR dma_esp32_isr_handle(const struct device *dev, uint8_t rx_
127132
#endif
128133

129134
static int dma_esp32_config_rx_descriptor(struct dma_esp32_channel *dma_channel,
130-
struct dma_block_config *block)
135+
struct dma_block_config *block)
131136
{
137+
if (!block) {
138+
LOG_ERR("At least one dma block is required");
139+
return -EINVAL;
140+
}
141+
132142
if (!esp_ptr_dma_capable((uint32_t *)block->dest_address)) {
133143
LOG_ERR("Rx buffer not in DMA capable memory: %p", (uint32_t *)block->dest_address);
134144
return -EINVAL;
135145
}
136146

137-
memset(&dma_channel->desc, 0, sizeof(dma_channel->desc));
138-
dma_channel->desc.buffer = (void *)block->dest_address;
139-
dma_channel->desc.dw0.size = block->block_size;
140-
dma_channel->desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
147+
dma_descriptor_t *desc_iter = dma_channel->desc_list;
148+
149+
for (int i = 0; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM; ++i) {
150+
if (block->block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
151+
LOG_ERR("Size of block %d is too large", i);
152+
return -EINVAL;
153+
}
154+
memset(desc_iter, 0, sizeof(dma_descriptor_t));
155+
desc_iter->buffer = (void *)block->dest_address;
156+
desc_iter->dw0.size = block->block_size;
157+
desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
158+
if (!block->next_block) {
159+
desc_iter->next = NULL;
160+
break;
161+
}
162+
desc_iter->next = desc_iter + 1;
163+
desc_iter += 1;
164+
block = block->next_block;
165+
}
166+
167+
if (desc_iter->next) {
168+
memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list));
169+
LOG_ERR("Too many dma blocks. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM");
170+
return -EINVAL;
171+
}
141172

142173
return 0;
143174
}
@@ -181,20 +212,46 @@ static int dma_esp32_config_rx(const struct device *dev, struct dma_esp32_channe
181212
}
182213

183214
static int dma_esp32_config_tx_descriptor(struct dma_esp32_channel *dma_channel,
184-
struct dma_block_config *block)
215+
struct dma_block_config *block)
185216
{
217+
if (!block) {
218+
LOG_ERR("At least one dma block is required");
219+
return -EINVAL;
220+
}
221+
186222
if (!esp_ptr_dma_capable((uint32_t *)block->source_address)) {
187223
LOG_ERR("Tx buffer not in DMA capable memory: %p",
188224
(uint32_t *)block->source_address);
189225
return -EINVAL;
190226
}
191227

192-
memset(&dma_channel->desc, 0, sizeof(dma_channel->desc));
193-
dma_channel->desc.buffer = (void *)block->source_address;
194-
dma_channel->desc.dw0.size = block->block_size;
195-
dma_channel->desc.dw0.length = block->block_size;
196-
dma_channel->desc.dw0.suc_eof = 1;
197-
dma_channel->desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
228+
dma_descriptor_t *desc_iter = dma_channel->desc_list;
229+
230+
for (int i = 0; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM; ++i) {
231+
if (block->block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
232+
LOG_ERR("Size of block %d is too large", i);
233+
return -EINVAL;
234+
}
235+
memset(desc_iter, 0, sizeof(dma_descriptor_t));
236+
desc_iter->buffer = (void *)block->source_address;
237+
desc_iter->dw0.size = block->block_size;
238+
desc_iter->dw0.length = block->block_size;
239+
desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
240+
if (!block->next_block) {
241+
desc_iter->next = NULL;
242+
desc_iter->dw0.suc_eof = 1;
243+
break;
244+
}
245+
desc_iter->next = desc_iter + 1;
246+
desc_iter += 1;
247+
block = block->next_block;
248+
}
249+
250+
if (desc_iter->next) {
251+
memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list));
252+
LOG_ERR("Too many dma blocks. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM");
253+
return -EINVAL;
254+
}
198255

199256
return 0;
200257
}
@@ -237,7 +294,7 @@ static int dma_esp32_config_tx(const struct device *dev, struct dma_esp32_channe
237294
}
238295

239296
static int dma_esp32_config(const struct device *dev, uint32_t channel,
240-
struct dma_config *config_dma)
297+
struct dma_config *config_dma)
241298
{
242299
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
243300
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
@@ -320,24 +377,24 @@ static int dma_esp32_start(const struct device *dev, uint32_t channel)
320377
GDMA_LL_EVENT_TX_EOF, true);
321378

322379
gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
323-
(int32_t)&dma_channel_rx->desc);
380+
(int32_t)dma_channel_rx->desc_list);
324381
gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
325382

326383
gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
327-
(int32_t)&dma_channel_tx->desc);
384+
(int32_t)dma_channel_tx->desc_list);
328385
gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
329386
} else {
330387
if (dma_channel->dir == DMA_RX) {
331388
gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
332389
UINT32_MAX, true);
333390
gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
334-
(int32_t)&dma_channel->desc);
391+
(int32_t)dma_channel->desc_list);
335392
gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
336393
} else if (dma_channel->dir == DMA_TX) {
337394
gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
338395
GDMA_LL_EVENT_TX_EOF, true);
339396
gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
340-
(int32_t)&dma_channel->desc);
397+
(int32_t)dma_channel->desc_list);
341398
gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
342399
} else {
343400
LOG_ERR("Channel %d is not configured", channel);
@@ -382,11 +439,12 @@ static int dma_esp32_stop(const struct device *dev, uint32_t channel)
382439
}
383440

384441
static int dma_esp32_get_status(const struct device *dev, uint32_t channel,
385-
struct dma_status *status)
442+
struct dma_status *status)
386443
{
387444
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
388445
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
389446
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
447+
dma_descriptor_t *desc;
390448

391449
if (channel >= config->dma_channel_max) {
392450
LOG_ERR("Unsupported channel");
@@ -397,16 +455,27 @@ static int dma_esp32_get_status(const struct device *dev, uint32_t channel,
397455
return -EINVAL;
398456
}
399457

458+
memset(status, 0, sizeof(struct dma_status));
459+
400460
if (dma_channel->dir == DMA_RX) {
401461
status->busy = !gdma_ll_rx_is_fsm_idle(data->hal.dev, dma_channel->channel_id);
402462
status->dir = PERIPHERAL_TO_MEMORY;
403-
status->read_position = dma_channel->desc.dw0.length;
463+
desc = (dma_descriptor_t *)gdma_ll_rx_get_current_desc_addr(
464+
data->hal.dev, dma_channel->channel_id);
465+
if (desc >= dma_channel->desc_list) {
466+
status->read_position = desc - dma_channel->desc_list;
467+
status->total_copied = desc->dw0.length
468+
+ dma_channel->desc_list[0].dw0.size
469+
* status->read_position;
470+
}
404471
} else if (dma_channel->dir == DMA_TX) {
405472
status->busy = !gdma_ll_tx_is_fsm_idle(data->hal.dev, dma_channel->channel_id);
406473
status->dir = MEMORY_TO_PERIPHERAL;
407-
status->write_position = dma_channel->desc.dw0.length;
408-
status->total_copied = dma_channel->desc.dw0.length;
409-
status->pending_length = dma_channel->desc.dw0.size - dma_channel->desc.dw0.length;
474+
desc = (dma_descriptor_t *)gdma_ll_tx_get_current_desc_addr(
475+
data->hal.dev, dma_channel->channel_id);
476+
if (desc >= dma_channel->desc_list) {
477+
status->write_position = desc - dma_channel->desc_list;
478+
}
410479
}
411480

412481
return 0;
@@ -418,8 +487,8 @@ static int dma_esp32_reload(const struct device *dev, uint32_t channel, uint32_t
418487
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
419488
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
420489
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
421-
struct dma_block_config block = {0};
422-
int err = 0;
490+
dma_descriptor_t *desc_iter = dma_channel->desc_list;
491+
uint32_t buf;
423492

424493
if (channel >= config->dma_channel_max) {
425494
LOG_ERR("Unsupported channel");
@@ -428,22 +497,40 @@ static int dma_esp32_reload(const struct device *dev, uint32_t channel, uint32_t
428497

429498
if (dma_channel->dir == DMA_RX) {
430499
gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id);
431-
block.block_size = size;
432-
block.dest_address = dst;
433-
err = dma_esp32_config_rx_descriptor(dma_channel, &block);
434-
if (err) {
435-
LOG_ERR("Error reloading RX channel (%d)", err);
436-
return err;
437-
}
500+
buf = dst;
438501
} else if (dma_channel->dir == DMA_TX) {
439502
gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id);
440-
block.block_size = size;
441-
block.source_address = src;
442-
err = dma_esp32_config_tx_descriptor(dma_channel, &block);
443-
if (err) {
444-
LOG_ERR("Error reloading TX channel (%d)", err);
445-
return err;
503+
buf = src;
504+
} else {
505+
return -EINVAL;
506+
}
507+
508+
for (int i = 0; i < ARRAY_SIZE(dma_channel->desc_list); ++i) {
509+
memset(desc_iter, 0, sizeof(dma_descriptor_t));
510+
desc_iter->buffer = (void *)(buf + DMA_DESCRIPTOR_BUFFER_MAX_SIZE * i);
511+
desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
512+
if (size < DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
513+
desc_iter->dw0.size = size;
514+
if (dma_channel->dir == DMA_TX) {
515+
desc_iter->dw0.length = size;
516+
desc_iter->dw0.suc_eof = 1;
517+
}
518+
desc_iter->next = NULL;
519+
break;
520+
}
521+
desc_iter->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
522+
if (dma_channel->dir == DMA_TX) {
523+
desc_iter->dw0.length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
446524
}
525+
size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
526+
desc_iter->next = desc_iter + 1;
527+
desc_iter += 1;
528+
}
529+
530+
if (desc_iter->next) {
531+
memset(desc_iter, 0, sizeof(dma_descriptor_t));
532+
LOG_ERR("Not enough DMA descriptors. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM");
533+
return -EINVAL;
447534
}
448535

449536
return 0;
@@ -497,7 +584,7 @@ static int dma_esp32_init(const struct device *dev)
497584
dma_channel->cb = NULL;
498585
dma_channel->dir = DMA_UNCONFIGURED;
499586
dma_channel->periph_id = ESP_GDMA_TRIG_PERIPH_INVALID;
500-
memset(&dma_channel->desc, 0, sizeof(dma_descriptor_t));
587+
memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list));
501588
}
502589

503590
gdma_hal_init(&data->hal, 0);
@@ -593,7 +680,7 @@ static void *irq_handlers[] = {
593680
.dev = (gdma_dev_t *)DT_INST_REG_ADDR(idx), \
594681
}, \
595682
}; \
596-
\
683+
\
597684
DEVICE_DT_INST_DEFINE(idx, &dma_esp32_init, NULL, &dma_data_##idx, &dma_config_##idx, \
598685
PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &dma_esp32_api);
599686

0 commit comments

Comments
 (0)