forked from ARMmbed/mbed-os
-
Notifications
You must be signed in to change notification settings - Fork 21
/
Copy pathGenericEthDMA.h
647 lines (530 loc) · 28.7 KB
/
GenericEthDMA.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
/* Copyright (c) 2025 Jamie Smith
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MBED_OS_GENERICETHDMA_H
#define MBED_OS_GENERICETHDMA_H
#include "CompositeEMAC.h"
#include "mbed_trace.h"
#include "CacheAlignedBuffer.h"
#include "mbed_critical.h"
#include <atomic>
#include <optional>
#define TRACE_GROUP "GEDMA"
namespace mbed {
/**
* @brief Generic transmit DMA loop
*
* This implementation of Tx DMA should work for the large majority of embedded MCUs that use a DMA ring-based
* ethernet MAC.
*/
class GenericTxDMARing : public CompositeEMAC::TxDMA
{
protected:
/// Number of entries in the Tx descriptor ring
static constexpr size_t TX_NUM_DESCS = MBED_CONF_NSAPI_EMAC_TX_NUM_DESCS;
/// Extra, unfilled Tx descs to leave in the DMA ring at all times.
/// This is used to support Eth MACs that don't allow enqueuing every single descriptor at a time.
const size_t extraTxDescsToLeave;
/// Pointer to first memory buffer in the chain associated with descriptor n.
/// The buffer address shall only be set for the *last* descriptor, so that the entire chain is freed
/// when the last descriptor is returned.
std::array<net_stack_mem_buf_t *, TX_NUM_DESCS> descStackBuffers{};
/// EventFlag used to signal when a Tx descriptor becomes available
rtos::EventFlags txDescAvailFlag;
// Indexes for descriptor rings.
// NOTE: when working with these indices, it's important to consider the case where e.g. the send and reclaim indexes are
// equal. This could mean *either* that the Tx ring is completely full of data, or that the Tx ring is empty.
// To resolve this ambiguity, we maintain separate count variables that track how many entries are in the ring at present.
size_t txSendIndex; ///< Index of the next Tx descriptor that can be filled with data
std::atomic<size_t> txDescsOwnedByApplication; ///< Number of Tx descriptors owned by the application. Decremented by txPacket() and incremented by reclaimTxDescs()
size_t txReclaimIndex; ///< Index of the next Tx descriptor that will be reclaimed by the mac thread calling reclaimTxDescs().
/// Construct, passing a value for extraTxDescsToLeave
GenericTxDMARing(size_t extraTxDescsToLeave = 0):
extraTxDescsToLeave(extraTxDescsToLeave)
{}
/// Configure DMA registers to point to the DMA ring,
/// and enable DMA. This is done before the MAC itself is enabled.
virtual void startDMA() = 0;
/// Stop the DMA running. This is done after MAC transmit & recieve are disabled.
virtual void stopDMA() = 0;
#if __DCACHE_PRESENT
/// Invalidate cache for the descriptor at the given index so it gets reloaded from main memory
virtual void cacheInvalidateDescriptor(size_t descIdx) = 0;
#endif
/// Is the given descriptor owned by DMA?
/// Note that the descriptor will already have been invalidated in cache if needed.
virtual bool descOwnedByDMA(size_t descIdx) = 0;
/// Get whether the given buffer is in a memory region readable by the Ethernet DMA.
/// If this returns false for a buffer being transmitted, the buffer will be copied into a new
/// heap-allocated buffer.
virtual bool isDMAReadableBuffer(uint8_t const * start, size_t size) const = 0;
/// Give the descriptor at the given index to DMA to be transmitted with the given buffer.
/// Note: if the descriptor needs to be flushed from CPU cache, you need to do that
/// at the correct point in the implementation of this method!
/// Also, if the DMA ran out of data to transmit, you may need to do a "poke"/"wake" operation
/// to tell it to start running again.
virtual void giveToDMA(size_t descIdx, uint8_t const * buffer, size_t len, bool firstDesc, bool lastDesc) = 0;
// Utility function for implementing isDMAReadableBuffer().
// 1D intersection test between a buffer and a memory bank.
static bool bufferTouchesMemoryBank(uint8_t const * start, const size_t size, const uint32_t bankStartAddr, const uint32_t bankSize) {
const auto startAddrInt = reinterpret_cast<uint32_t>(start);
if(startAddrInt < bankStartAddr) {
// Case 1: buffer begins before bank
return (startAddrInt + size) > bankStartAddr;
}
else if(startAddrInt >= bankStartAddr && startAddrInt < (bankStartAddr + bankSize)) {
// Case 2: buffer begins inside bank
return true;
}
else {
// Case 3: buffer begins after bank
return false;
}
}
public:
CompositeEMAC::ErrCode init() override {
// At the start, we own all the descriptors
txDescsOwnedByApplication = TX_NUM_DESCS;
// Next descriptor will be descriptor 0
txSendIndex = 0;
txReclaimIndex = 0;
startDMA();
return CompositeEMAC::ErrCode::SUCCESS;
}
CompositeEMAC::ErrCode deinit() override {
stopDMA();
// Deallocate all buffers currently assigned to the DMA ring
for(auto & buf_addr : descStackBuffers) {
if(buf_addr != nullptr) {
memory_manager->free(buf_addr);
buf_addr = nullptr;
}
}
return CompositeEMAC::ErrCode::SUCCESS;
}
bool reclaimTxDescs() override {
bool returnedAnyDescriptors = false;
while (true)
{
if (txReclaimIndex == txSendIndex && txDescsOwnedByApplication > 0) {
// If we have reached the Tx send index, we want to stop iterating as this is
// the next descriptor that has not been populated by the application yet.
// The only exception is if the Tx ring is completely full, in which case we want
// to process the entire ring. In the case where the Tx ring is full,
// txDescsOwnedByApplication will be 0.
// Note that txSendIndex and txDescsOwnedByApplication are updated in a critical
// section so their values will always be in sync with each other.
break;
}
#if __DCACHE_PRESENT
cacheInvalidateDescriptor(txReclaimIndex);
#endif
if (descOwnedByDMA(txReclaimIndex)) {
// This desc is owned by the DMA, so we have reached the part of the ring buffer
// that is still being transmitted.
// Done for now!
break;
}
// Free any buffers associated with the descriptor
if (descStackBuffers[txReclaimIndex] != nullptr) {
memory_manager->free(descStackBuffers[txReclaimIndex]);
descStackBuffers[txReclaimIndex] = nullptr;
}
// Update counters
txReclaimIndex = (txReclaimIndex + 1) % TX_NUM_DESCS;
++txDescsOwnedByApplication;
tr_debug("Reclaimed descriptor %zu", txReclaimIndex);
returnedAnyDescriptors = true;
}
if(returnedAnyDescriptors) {
txDescAvailFlag.set(1);
}
return returnedAnyDescriptors;
}
CompositeEMAC::ErrCode txPacket(net_stack_mem_buf_t * buf) {
// Step 1: Figure out if we can send this zero-copy, or if we need to copy it.
size_t packetDescsUsed = memory_manager->count_buffers(buf);
size_t neededFreeDescs = packetDescsUsed + extraTxDescsToLeave;
bool needToCopy = false;
if(neededFreeDescs >= TX_NUM_DESCS)
{
// Packet uses too many buffers, we have to copy it into a continuous buffer.
// Note: Some Eth DMAs (e.g. STM32 v2) cannot enqueue all the descs in the ring at the same time
// so we can't use every single descriptor to send the packet.
needToCopy = true;
}
if(!needToCopy && (neededFreeDescs > txDescsOwnedByApplication && txDescsOwnedByApplication > extraTxDescsToLeave)) {
// Packet uses more buffers than we have descriptors, but we can send it immediately if we copy
// it into a single buffer.
needToCopy = true;
}
if(!needToCopy) {
net_stack_mem_buf_t * currBuf = buf;
while(currBuf != nullptr) {
// If this buffer is passed down direct from the application, we will need to
// copy the packet.
if(memory_manager->get_lifetime(currBuf) == NetStackMemoryManager::Lifetime::VOLATILE)
{
needToCopy = true;
break;
}
// Or, if the buffer is in DMA-inaccessible RAM, we will need to copy it
if(!isDMAReadableBuffer(static_cast<uint8_t *>(memory_manager->get_ptr(currBuf)), memory_manager->get_len(currBuf))) {
needToCopy = true;
break;
}
currBuf = memory_manager->get_next(currBuf);
}
}
tr_debug("Transmitting packet of length %lu in %zu buffers and %zu descs\n",
memory_manager->get_total_len(buf), memory_manager->count_buffers(buf), neededDescs);
// Step 2: Copy packet if needed
if(needToCopy)
{
auto * newBuf = memory_manager->alloc_heap(memory_manager->get_total_len(buf), 0);
if(newBuf == nullptr)
{
// No free memory, drop packet
return CompositeEMAC::ErrCode::OUT_OF_MEMORY;
}
// We should have gotten just one contiguous buffer
MBED_ASSERT(memory_manager->get_next(newBuf) == nullptr);
packetDescsUsed = 1;
neededFreeDescs = packetDescsUsed + extraTxDescsToLeave;
// Copy data over
memory_manager->copy_from_buf(static_cast<uint8_t *>(memory_manager->get_ptr(newBuf)), memory_manager->get_len(newBuf), buf);
memory_manager->free(buf);
buf = newBuf;
}
// Step 3: Wait for needed amount of buffers to be available.
// Note that, in my experience, it's better to block here, as dropping the packet
// due to not having enough buffers can create weird effects when the application sends
// lots of packets at once.
while(txDescsOwnedByApplication < neededFreeDescs)
{
txDescAvailFlag.wait_any_for(1, rtos::Kernel::wait_for_u32_forever);
}
// Step 4: Load buffer into descriptors and send
net_stack_mem_buf_t * currBuf = buf;
for(size_t descCount = 0; descCount < packetDescsUsed; descCount++)
{
#if __DCACHE_PRESENT
// Write buffer back to main memory
SCB_CleanDCache_by_Addr(memory_manager->get_ptr(currBuf), memory_manager->get_len(currBuf));
#endif
// Get next buffer
const auto nextBuf = memory_manager->get_next(currBuf);
if(nextBuf == nullptr)
{
// Last descriptor, store head buffer address for freeing
descStackBuffers[txSendIndex] = buf;
}
else
{
descStackBuffers[txSendIndex] = nullptr;
}
// Get the pointer and length of the packet because this might not be doable in a critical section
const auto bufferPtr = static_cast<uint8_t *>(memory_manager->get_ptr(currBuf));
const auto bufferLen = memory_manager->get_len(currBuf);
// Enter a critical section, because we could run into weird corner cases if the
// interrupt executes while we are half done configuring this descriptor and updating
// the counters.
core_util_critical_section_enter();
// Configure settings.
giveToDMA(txSendIndex, bufferPtr, bufferLen, descCount == 0, nextBuf == nullptr);
// Update descriptor count and index
--txDescsOwnedByApplication;
txSendIndex = (txSendIndex + 1) % MBED_CONF_NSAPI_EMAC_TX_NUM_DESCS;
core_util_critical_section_exit();
// Move to next buffer
currBuf = nextBuf;
}
return CompositeEMAC::ErrCode::SUCCESS;
}
};
/**
* @brief Generic receive DMA loop
*
* This implementation of Rx DMA should work for the large majority of embedded MCUs that use a DMA ring-based
* ethernet MAC.
*
* The subclass must allocate the DMA descriptors, and all access to them is done through virtual functions
* that the subclass must override.
*/
class GenericRxDMARing : public CompositeEMAC::RxDMA {
protected:
/// How many extra buffers to leave in the Rx pool, relative to how many we keep assigned to Rx descriptors.
/// We want to keep some amount of extra buffers because constantly hitting the network stack with failed pool
/// allocations can produce some negative consequences in some cases.
static constexpr size_t RX_POOL_EXTRA_BUFFERS = 3;
/// Number of entries in the Rx descriptor ring
/// Note: + 1 because for some EMACs (STM32 v2) we have to always keep one descriptor owned by the application
// TODO: When we add multiple Ethernet support, this calculation may need to be changed, because the pool buffers will be split between multiple EMACs
static constexpr size_t RX_NUM_DESCS = MBED_CONF_NSAPI_EMAC_RX_POOL_NUM_BUFS - RX_POOL_EXTRA_BUFFERS + 1;
/// Pointer to the network stack buffer associated with the corresponding Rx descriptor.
net_stack_mem_buf_t * rxDescStackBufs[RX_NUM_DESCS];
// Indexes for descriptor rings.
size_t rxBuildIndex; ///< Index of the next Rx descriptor that needs to be built. Updated by application and used by ISR.
size_t rxDescsOwnedByApplication; ///< Number of Rx descriptors owned by the application and needing buffers allocated.
std::atomic<size_t> rxNextIndex; ///< Index of the next descriptor that the DMA will populate. Updated by application but used by ISR.
// Alignment required for Rx memory buffers. Normally they don't need more than word alignment but
// if we are doing cache operations they need to be cache aligned.
#if __DCACHE_PRESENT
static constexpr size_t RX_BUFFER_ALIGN = __SCB_DCACHE_LINE_SIZE;
#else
static constexpr size_t RX_BUFFER_ALIGN = sizeof(uint32_t);
#endif
/// Payload size of buffers allocated from the Rx pool. This is the allocation unit size
/// of the pool minus any overhead needed for alignment.
size_t rxPoolPayloadSize;
/// Constructor. Subclass must allocate descriptor array of size RX_NUM_DESCS
GenericRxDMARing() = default;
/// Configure DMA registers to point to the DMA ring,
/// and enable DMA. This is done before the MAC itself is enabled, and before any descriptors
/// are given to DMA.
virtual void startDMA() = 0;
/// Stop the DMA running. This is done after MAC transmit & receive are disabled.
virtual void stopDMA() = 0;
#if __DCACHE_PRESENT
/// Invalidate cache for the descriptor at the given index so it gets reloaded from main memory
virtual void cacheInvalidateDescriptor(size_t descIdx) = 0;
#endif
/// Is the given descriptor owned by DMA?
/// Note that the descriptor will already have been invalidated in cache if needed.
virtual bool descOwnedByDMA(size_t descIdx) = 0;
/// Does the given descriptor contain the start of a packet?
/// Note that the descriptor will already have been invalidated in cache if needed.
virtual bool isFirstDesc(size_t descIdx) = 0;
/// Does the given descriptor contain the end of a packet?
/// /// Note that the descriptor will already have been invalidated in cache if needed.
virtual bool isLastDesc(size_t descIdx) = 0;
/// Is the given descriptor an error descriptor?
/// /// Note that the descriptor will already have been invalidated in cache if needed.
virtual bool isErrorDesc(size_t descIdx) = 0;
/// Return a descriptor to DMA so that DMA can receive into it.
/// Is passed the buffer address (fixed size equal to rxPoolPayloadSize) to attach to this descriptor.
/// Note: if the descriptor needs to be flushed from CPU cache, you need to do that
/// at the correct point in the implementation of this method!
/// Also, if the DMA ran out of data to transmit, you may need to do a "poke"/"wake" operation
/// to tell it to start running again.
virtual void returnDescriptor(size_t descIdx, uint8_t * buffer) = 0;
/// Get the length of the packet starting at firstDescIdx and continuing until
/// lastDescIdx (which might or might not be the same as firstDescIdx). Descriptors have already been
/// validated to contain a complete packet at this point.
virtual size_t getTotalLen(size_t firstDescIdx, size_t lastDescIdx) = 0;
public:
CompositeEMAC::ErrCode init() override {
rxPoolPayloadSize = memory_manager->get_pool_alloc_unit(RX_BUFFER_ALIGN);
rxBuildIndex = 0;
rxNextIndex = 0;
// At the start, we own all the descriptors
rxDescsOwnedByApplication = RX_NUM_DESCS;
// init DMA peripheral
startDMA();
// Build all descriptors
rebuildDescriptors();
return CompositeEMAC::ErrCode::SUCCESS;
}
CompositeEMAC::ErrCode deinit() override {
stopDMA();
// Deallocate buffers associated with all descriptors
for(size_t descIdx = 0; descIdx < RX_NUM_DESCS; ++descIdx) {
if(rxDescStackBufs[descIdx] != nullptr) {
memory_manager->free(rxDescStackBufs[descIdx]);
}
}
return CompositeEMAC::ErrCode::SUCCESS;
}
void rebuildDescriptors() override {
const size_t origRxDescsOwnedByApplication [[maybe_unused]] = rxDescsOwnedByApplication;
// Note: With some Ethernet peripherals, you can never give back every single descriptor to
// the hardware, because then it thinks there are 0 descriptors left.
while (rxDescsOwnedByApplication > 1) {
// Allocate new buffer
auto *const buffer = memory_manager->alloc_pool(rxPoolPayloadSize, RX_BUFFER_ALIGN);
if (buffer == nullptr) {
// No memory, cannot return any more descriptors.
return;
}
// Store buffer address
rxDescStackBufs[rxBuildIndex] = buffer;
// Send descriptor to DMA
returnDescriptor(rxBuildIndex, static_cast<uint8_t *>(memory_manager->get_ptr(buffer)));
// Move to next descriptor
--rxDescsOwnedByApplication;
rxBuildIndex = (rxBuildIndex + 1) % RX_NUM_DESCS;
}
tr_debug("buildRxDescriptors(): Returned %zu descriptors.", origRxDescsOwnedByApplication - rxDescsOwnedByApplication);
}
bool rxHasPackets_ISR() override {
// First, we need to check if at least one DMA descriptor that is owned by the application
// has its last descriptor flag or error flag set, indicating we have received at least one complete packet
// or there is an error descriptor that can be reclaimed by the application.
// Note that we want to bias towards false positives here, because false positives just waste CPU time,
// while false negatives would cause packets to be dropped.
// So, for simplicity, we just check every descriptor currently owned by the application until we
// find one with the FS bit set or the error bits set.
// This could potentially produce a false positive if we do this in the middle of receiving
// an existing packet, but that is unlikely and will not cause anything bad to happen if it does.
bool seenFirstDesc = false;
for(size_t descCount = 0; descCount < RX_NUM_DESCS; descCount++)
{
size_t descIdx = (rxNextIndex + descCount) % RX_NUM_DESCS;
#if __DCACHE_PRESENT
cacheInvalidateDescriptor(descIdx);
#endif
if(descOwnedByDMA(descIdx))
{
// Descriptor owned by DMA. We are out of descriptors to process.
return false;
}
if(isFirstDesc(descIdx))
{
if(seenFirstDesc)
{
// First desc seen after another first desc.
// Some MACs do this if they run out of Rx descs when halfway through a packet.
// dequeuePacket() can clean this up and reclaim the partial packet desc(s).
return true;
}
else
{
seenFirstDesc = true;
}
}
if(isErrorDesc(descIdx) || isLastDesc(descIdx))
{
// Reclaimable descriptor or complete packet detected.
return true;
}
}
// Processed all descriptors.
return false;
}
private:
/// Helper function: Discard received Rx descriptors from a given start index (inclusive) to stop index (exclusive)
void discardRxDescs(size_t startIdx, size_t stopIdx)
{
for(size_t descToCleanIdx = startIdx; descToCleanIdx != stopIdx; descToCleanIdx = (descToCleanIdx + 1) % RX_NUM_DESCS) {
// Free Rx buffer attached to this desc
memory_manager->free(rxDescStackBufs[descToCleanIdx]);
rxDescStackBufs[descToCleanIdx] = nullptr;
// Allow desc to be rebuilt
++rxDescsOwnedByApplication;
++rxNextIndex;
}
}
public:
net_stack_mem_buf_t * dequeuePacket() override {
// Indices of the first and last descriptors for the packet will be saved here
std::optional<size_t> firstDescIdx, lastDescIdx;
// Packet length is stored here once we check it
size_t pktLen;
// Prevent looping around into descriptors waiting for rebuild by limiting how many
// we can process.
const size_t maxDescsToProcess = RX_NUM_DESCS - rxDescsOwnedByApplication;
const size_t startIdx = rxNextIndex;
for (size_t descCount = 0; descCount < maxDescsToProcess && !lastDescIdx.has_value(); descCount++) {
size_t descIdx = (startIdx + descCount) % RX_NUM_DESCS;
#if __DCACHE_PRESENT
cacheInvalidateDescriptor(descIdx);
#endif
if (descOwnedByDMA(descIdx)) {
// Descriptor owned by DMA and has not been filled in yet. We are out of descriptors to process.
break;
}
const bool isError = isErrorDesc(descIdx);
const bool isFirst = isFirstDesc(descIdx);
const bool isLast = isLastDesc(descIdx);
if (!firstDescIdx.has_value() && (isError || !isFirst)) {
// Error or non-first-descriptor before a first descriptor
// (could be caused by incomplete packets/junk in the DMA buffer).
// Ignore, free associated memory, and schedule for rebuild.
discardRxDescs(descIdx, (descIdx + 1) % RX_NUM_DESCS);
continue;
}
else if(firstDescIdx.has_value() && isError)
{
// Already seen a first descriptor, but we have an error descriptor.
// So, delete the in-progress packet up to this point.
discardRxDescs(*firstDescIdx, (descIdx + 1) % RX_NUM_DESCS);
firstDescIdx.reset();
continue;
}
else if(firstDescIdx.has_value() && isFirst)
{
// Already seen a first descriptor, but we have another first descriptor.
// Some MACs do this if they run out of Rx descs when halfway through a packet.
// Delete the in-progress packet up to this point and start over from descIdx.
discardRxDescs(*firstDescIdx, descIdx);
firstDescIdx = descIdx;
}
else if(isFirst)
{
// Normal first descriptor.
firstDescIdx = descIdx;
}
if(isLast) {
pktLen = getTotalLen(*firstDescIdx, descIdx);
lastDescIdx = descIdx;
}
}
if (!lastDescIdx.has_value()) {
// No complete packet identified.
// Take the chance to rebuild any available descriptors, then return.
rebuildDescriptors();
tr_debug("No complete packets in Rx descs\n");
return nullptr;
}
// We will receive next into the descriptor after this one.
// Update this now to tell the ISR to search for descriptors after lastDescIdx only.
rxNextIndex = (*lastDescIdx + 1) % RX_NUM_DESCS;
// Set length of first buffer
net_stack_mem_buf_t *const headBuffer = rxDescStackBufs[*firstDescIdx];
memory_manager->set_len(headBuffer, std::min(pktLen, rxPoolPayloadSize));
size_t lenRemaining = pktLen - std::min(pktLen, rxPoolPayloadSize);
// Iterate through the subsequent descriptors in this packet and link the buffers
// Note that this also transfers ownership of subsequent buffers to the first buffer,
// so if the first buffer is deleted, the others will be as well.
++rxDescsOwnedByApplication; // for first buffer
rxDescStackBufs[*firstDescIdx] = nullptr;
for (size_t descIdx = (*firstDescIdx + 1) % RX_NUM_DESCS;
descIdx != (*lastDescIdx + 1) % RX_NUM_DESCS;
descIdx = (descIdx + 1) % RX_NUM_DESCS) {
// We have to set the buffer length first before concatenating it to the chain
MBED_ASSERT(lenRemaining > 0);
memory_manager->set_len(rxDescStackBufs[descIdx], std::min(lenRemaining, rxPoolPayloadSize));
lenRemaining -= std::min(lenRemaining, rxPoolPayloadSize);
memory_manager->cat(headBuffer, rxDescStackBufs[descIdx]);
rxDescStackBufs[descIdx] = nullptr;
++rxDescsOwnedByApplication;
}
// Invalidate cache for all data buffers, as these were written by the DMA to main memory
#if __DCACHE_PRESENT
auto * bufToInvalidate = headBuffer;
while(bufToInvalidate != nullptr)
{
SCB_InvalidateDCache_by_Addr(memory_manager->get_ptr(bufToInvalidate), rxPoolPayloadSize);
bufToInvalidate = memory_manager->get_next(bufToInvalidate);
}
#endif
tr_debug("Returning packet of length %lu, start %p from Rx descriptors %zu-%zu\n",
memory_manager->get_total_len(headBuffer), memory_manager->get_ptr(headBuffer), *firstDescIdx, *lastDescIdx);
return headBuffer;
}
};
}
#undef TRACE_GROUP
#endif //MBED_OS_GENERICETHDMA_H