Skip to content

Commit 1f29dd3

Browse files
Andy Rossnashif
Andy Ross
authored andcommitted
lib/os/heap: General refactoring
Miscellaneous refactoring and simplification. No behavioral changes: Make split_alloc() take and return chunk IDs and not memory pointers, leaving the conversion between memory/chunks the job of the higher level sys_heap_alloc() API. This cleans up the internals for code that wants to do allocation but has its own ideas about what to do with the resulting chunks. Add split_chunks() and merge_chunks() utilities to own the linear/size pointers and have split_alloc() and free_chunks() use them instead of doing the list management directly. Signed-off-by: Andy Ross <[email protected]>
1 parent 48f43b5 commit 1f29dd3

File tree

1 file changed

+76
-47
lines changed

1 file changed

+76
-47
lines changed

lib/os/heap.c

Lines changed: 76 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@
99

1010
static void *chunk_mem(struct z_heap *h, chunkid_t c)
1111
{
12+
if (c == 0) {
13+
return NULL;
14+
}
15+
1216
chunk_unit_t *buf = chunk_buf(h);
1317
uint8_t *ret = ((uint8_t *)&buf[c]) + chunk_header_bytes(h);
1418

@@ -66,10 +70,37 @@ static void free_list_add(struct z_heap *h, chunkid_t c)
6670
}
6771
}
6872

73+
/* Splits a chunk "lc" into a left chunk and a right chunk at "rc".
74+
* Leaves both chunks marked "free"
75+
*/
76+
static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
77+
{
78+
CHECK(rc > lc);
79+
CHECK(rc - lc < chunk_size(h, lc));
80+
81+
size_t sz0 = chunk_size(h, lc);
82+
size_t lsz = rc - lc;
83+
size_t rsz = sz0 - lsz;
84+
85+
set_chunk_size(h, lc, lsz);
86+
set_chunk_size(h, rc, rsz);
87+
set_left_chunk_size(h, rc, lsz);
88+
set_left_chunk_size(h, right_chunk(h, rc), rsz);
89+
}
90+
91+
/* Does not modify free list */
92+
static void merge_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
93+
{
94+
size_t newsz = chunk_size(h, lc) + chunk_size(h, rc);
95+
96+
set_chunk_size(h, lc, newsz);
97+
set_left_chunk_size(h, right_chunk(h, rc), newsz);
98+
}
99+
69100
/* Allocates (fit check has already been perfomred) from the next
70101
* chunk at the specified bucket level
71102
*/
72-
static void *split_alloc(struct z_heap *h, int bidx, size_t sz)
103+
static chunkid_t split_alloc(struct z_heap *h, int bidx, size_t sz)
73104
{
74105
CHECK(h->buckets[bidx].next != 0
75106
&& sz <= chunk_size(h, h->buckets[bidx].next));
@@ -79,32 +110,44 @@ static void *split_alloc(struct z_heap *h, int bidx, size_t sz)
79110
free_list_remove(h, bidx, c);
80111

81112
/* Split off remainder if it's usefully large */
82-
size_t rem = chunk_size(h, c) - sz;
113+
if ((chunk_size(h, c) - sz) >= (big_heap(h) ? 2 : 1)) {
114+
split_chunks(h, c, c + sz);
115+
free_list_add(h, c + sz);
116+
}
117+
118+
set_chunk_used(h, c, true);
119+
return c;
120+
}
83121

84-
CHECK(rem < h->len);
122+
static void free_chunks(struct z_heap *h, chunkid_t c)
123+
{
124+
set_chunk_used(h, c, false);
85125

86-
if (rem >= min_chunk_size(h)) {
87-
chunkid_t c2 = c + sz;
88-
chunkid_t c3 = right_chunk(h, c);
126+
/* Merge with free right chunk? */
127+
if (!chunk_used(h, right_chunk(h, c))) {
128+
int bi = bucket_idx(h, chunk_size(h, right_chunk(h, c)));
89129

90-
set_chunk_size(h, c, sz);
91-
set_chunk_size(h, c2, rem);
92-
set_left_chunk_size(h, c2, sz);
93-
set_left_chunk_size(h, c3, rem);
94-
free_list_add(h, c2);
130+
free_list_remove(h, bi, right_chunk(h, c));
131+
merge_chunks(h, c, right_chunk(h, c));
95132
}
96133

97-
set_chunk_used(h, c, true);
134+
/* Merge with free left chunk? */
135+
if (!chunk_used(h, left_chunk(h, c))) {
136+
int bi = bucket_idx(h, chunk_size(h, left_chunk(h, c)));
137+
138+
free_list_remove(h, bi, left_chunk(h, c));
139+
merge_chunks(h, left_chunk(h, c), c);
140+
c = left_chunk(h, c);
141+
}
98142

99-
return chunk_mem(h, c);
143+
free_list_add(h, c);
100144
}
101145

102146
void sys_heap_free(struct sys_heap *heap, void *mem)
103147
{
104148
if (mem == NULL) {
105149
return; /* ISO C free() semantics */
106150
}
107-
108151
struct z_heap *h = heap->heap;
109152
chunkid_t c = ((uint8_t *)mem - chunk_header_bytes(h)
110153
- (uint8_t *)chunk_buf(h)) / CHUNK_UNIT;
@@ -115,51 +158,26 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
115158
*/
116159
__ASSERT(chunk_used(h, c),
117160
"unexpected heap state (double-free?) for memory at %p", mem);
161+
118162
/*
119163
* It is easy to catch many common memory overflow cases with
120164
* a quick check on this and next chunk header fields that are
121165
* immediately before and after the freed memory.
122166
*/
123167
__ASSERT(left_chunk(h, right_chunk(h, c)) == c,
124-
"corrupted heap bounds (buffer overflow?) for memory at %p", mem);
125-
126-
/* Merge with right chunk? We can just absorb it. */
127-
if (!chunk_used(h, right_chunk(h, c))) {
128-
chunkid_t rc = right_chunk(h, c);
129-
size_t newsz = chunk_size(h, c) + chunk_size(h, rc);
130-
131-
free_list_remove(h, bucket_idx(h, chunk_size(h, rc)), rc);
132-
set_chunk_size(h, c, newsz);
133-
set_left_chunk_size(h, right_chunk(h, c), newsz);
134-
}
135-
136-
/* Merge with left chunk? It absorbs us. */
137-
if (!chunk_used(h, left_chunk(h, c))) {
138-
chunkid_t lc = left_chunk(h, c);
139-
chunkid_t rc = right_chunk(h, c);
140-
size_t csz = chunk_size(h, c);
141-
size_t merged_sz = csz + chunk_size(h, lc);
142-
143-
free_list_remove(h, bucket_idx(h, chunk_size(h, lc)), lc);
144-
set_chunk_size(h, lc, merged_sz);
145-
set_left_chunk_size(h, rc, merged_sz);
146-
147-
c = lc;
148-
}
168+
"corrupted heap bounds (buffer overflow?) for memory at %p",
169+
mem);
149170

150-
set_chunk_used(h, c, false);
151-
free_list_add(h, c);
171+
free_chunks(h, c);
152172
}
153173

154-
void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
174+
static chunkid_t alloc_chunks(struct z_heap *h, size_t sz)
155175
{
156-
struct z_heap *h = heap->heap;
157-
size_t sz = bytes_to_chunksz(h, bytes);
158176
int bi = bucket_idx(h, sz);
159177
struct z_heap_bucket *b = &h->buckets[bi];
160178

161-
if (bytes == 0 || bi > bucket_idx(h, h->len)) {
162-
return NULL;
179+
if (bi > bucket_idx(h, h->len)) {
180+
return 0;
163181
}
164182

165183
/* First try a bounded count of items from the minimal bucket
@@ -199,7 +217,18 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
199217
return split_alloc(h, minbucket, sz);
200218
}
201219

202-
return NULL;
220+
return 0;
221+
}
222+
223+
void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
224+
{
225+
if (bytes == 0) {
226+
return NULL;
227+
}
228+
size_t chunksz = bytes_to_chunksz(heap->heap, bytes);
229+
chunkid_t c = alloc_chunks(heap->heap, chunksz);
230+
231+
return chunk_mem(heap->heap, c);
203232
}
204233

205234
void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)

0 commit comments

Comments
 (0)