Skip to content

Commit ed258e9

Browse files
Andy Rossnashif
Andy Ross
authored andcommitted
lib/os/heap: Add sys_heap_aligned_alloc()
Add support for a C11-style aligned_alloc() in the heap implementation. This is properly optimized, in the sense that unused prefix/suffix data around the chosen allocation is returned to the heap and made available for general allocation. Signed-off-by: Andy Ross <[email protected]>
1 parent 1f29dd3 commit ed258e9

File tree

4 files changed

+89
-1
lines changed

4 files changed

+89
-1
lines changed

include/sys/sys_heap.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,21 @@ void sys_heap_init(struct sys_heap *h, void *mem, size_t bytes);
8888
*/
8989
void *sys_heap_alloc(struct sys_heap *h, size_t bytes);
9090

91+
/** @brief Allocate aligned memory from a sys_heap
92+
*
93+
* Behaves in all ways like sys_heap_alloc(), except that the returned
94+
* memory (if available) will have a starting address in memory which
95+
* is a multiple of the specified power-of-two alignment value in
96+
* bytes. The resulting memory can be returned to the heap using
97+
* sys_heap_free().
98+
*
99+
* @param h Heap from which to allocate
100+
* @param align Alignment in bytes, must be a power of two
101+
* @param bytes Number of bytes requested
102+
* @return Pointer to memory the caller can now use
103+
*/
104+
void *sys_heap_aligned_alloc(struct sys_heap *h, size_t align, size_t bytes);
105+
91106
/** @brief Free memory into a sys_heap
92107
*
93108
* De-allocates a pointer to memory previously returned from

lib/os/Kconfig

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,4 +46,14 @@ config SYS_HEAP_ALLOC_LOOPS
4646
keeps the maximum runtime at a tight bound so that the heap
4747
is useful in locked or ISR contexts.
4848

49+
config SYS_HEAP_ALIGNED_ALLOC
50+
bool "Enable sys_heap_aligned_alloc() API"
51+
help
52+
When true, the sys_heap_aligned_alloc() API is available to
53+
guarantee alignment of returned heap blocks in an efficient
54+
way. For technical reasons, this requires the use of the
55+
"big" 8 byte heap block header format, so it will moderately
56+
increase heap memory overhead on 32 bit platforms when using
57+
small (<256kb) heaps.
58+
4959
endmenu

lib/os/heap.c

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,19 @@ static void *chunk_mem(struct z_heap *h, chunkid_t c)
2121
return ret;
2222
}
2323

24+
static inline bool solo_free_header(struct z_heap *h, chunkid_t c)
25+
{
26+
return (IS_ENABLED(CONFIG_SYS_HEAP_ALIGNED_ALLOC)
27+
&& chunk_size(h, c) == 1);
28+
}
29+
2430
static void free_list_remove(struct z_heap *h, int bidx,
2531
chunkid_t c)
2632
{
33+
if (solo_free_header(h, c)) {
34+
return;
35+
}
36+
2737
struct z_heap_bucket *b = &h->buckets[bidx];
2838

2939
CHECK(!chunk_used(h, c));
@@ -46,6 +56,10 @@ static void free_list_remove(struct z_heap *h, int bidx,
4656

4757
static void free_list_add(struct z_heap *h, chunkid_t c)
4858
{
59+
if (solo_free_header(h, c)) {
60+
return;
61+
}
62+
4963
int bi = bucket_idx(h, chunk_size(h, c));
5064

5165
if (h->buckets[bi].next == 0) {
@@ -231,6 +245,54 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
231245
return chunk_mem(heap->heap, c);
232246
}
233247

248+
void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
249+
{
250+
struct z_heap *h = heap->heap;
251+
252+
CHECK((align & (align - 1)) == 0);
253+
CHECK(big_heap(h));
254+
if (bytes == 0) {
255+
return NULL;
256+
}
257+
258+
/* Find a free block that is guaranteed to fit */
259+
size_t chunksz = bytes_to_chunksz(h, bytes);
260+
size_t mask = (align / CHUNK_UNIT) - 1;
261+
size_t padsz = MAX(CHUNK_UNIT, chunksz + mask);
262+
chunkid_t c0 = alloc_chunks(h, padsz);
263+
264+
if (c0 == 0) {
265+
return NULL;
266+
}
267+
268+
/* Align within memory, using "chunk index" units. Remember
269+
* the block we're aligning starts in the chunk AFTER the
270+
* header!
271+
*/
272+
size_t c0i = ((size_t) &chunk_buf(h)[c0 + 1]) / CHUNK_UNIT;
273+
size_t ci = ((c0i + mask) & ~mask);
274+
chunkid_t c = c0 + (ci - c0i);
275+
276+
CHECK(c >= c0 && c < c0 + padsz);
277+
CHECK((((size_t) chunk_mem(h, c)) & (align - 1)) == 0);
278+
279+
/* Split and free unused prefix */
280+
if (c > c0) {
281+
split_chunks(h, c0, c);
282+
set_chunk_used(h, c, true);
283+
free_chunks(h, c0);
284+
}
285+
286+
/* Split and free unused suffix */
287+
if (chunksz < chunk_size(h, c)) {
288+
split_chunks(h, c, c + chunksz);
289+
set_chunk_used(h, c, true);
290+
free_chunks(h, c + chunksz);
291+
}
292+
293+
return chunk_mem(h, c);
294+
}
295+
234296
void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
235297
{
236298
/* Must fit in a 32 bit count of HUNK_UNIT */

lib/os/heap.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,8 @@ struct z_heap {
6666

6767
static inline bool big_heap_chunks(size_t chunks)
6868
{
69-
return sizeof(void *) > 4 || chunks > 0x7fff;
69+
return IS_ENABLED(CONFIG_SYS_HEAP_ALIGNED_ALLOC)
70+
|| sizeof(void *) > 4 || chunks > 0x7fff;
7071
}
7172

7273
static inline bool big_heap_bytes(size_t bytes)

0 commit comments

Comments
 (0)