|
| 1 | +/* |
| 2 | + * Copyright (c) 2012, 2013 ARM Ltd |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * Redistribution and use in source and binary forms, with or without |
| 6 | + * modification, are permitted provided that the following conditions |
| 7 | + * are met: |
| 8 | + * 1. Redistributions of source code must retain the above copyright |
| 9 | + * notice, this list of conditions and the following disclaimer. |
| 10 | + * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | + * notice, this list of conditions and the following disclaimer in the |
| 12 | + * documentation and/or other materials provided with the distribution. |
| 13 | + * 3. The name of the company may not be used to endorse or promote |
| 14 | + * products derived from this software without specific prior written |
| 15 | + * permission. |
| 16 | + * |
| 17 | + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 18 | + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 19 | + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| 20 | + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED |
| 22 | + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 23 | + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 24 | + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 25 | + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 26 | + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | + */ |
| 28 | + |
| 29 | +/** |
| 30 | + * NOTE: This file is based on newlib/libc/stdlib/nano-mallocr.c |
| 31 | + * from git://sourceware.org/git/newlib-cygwin.git. It contains a |
| 32 | + * workaround for a bug in newlib 4.1.0: |
| 33 | + * |
| 34 | + * The commit 84d0689 "Nano-malloc: Fix for unwanted external heap |
| 35 | + * fragmentation" from newlib 4.1.0 introduced several optimizations, |
| 36 | + * one of which is as follows: |
| 37 | + * |
| 38 | + * When the last chunk in the free list is smaller than requested, |
| 39 | + * nano_malloc() calls sbrk(0) to see if the heap's current head is |
| 40 | + * adjacent to this chunk, and if so it asks sbrk() for the difference |
| 41 | + * in bytes only and expands the current chunk. |
| 42 | + * |
| 43 | + * This doesn't work if the heap consists of non-contiguous regions. |
| 44 | + * sbrk(0) returns the the current region's head if the latter has any |
| 45 | + * remaining capacity. But if this capacity is not enough for the second |
| 46 | + * (non-trivial) call to sbrk() described above, allocation will happen |
| 47 | + * at the next region if available. Expanding the current chunk won't |
| 48 | + * work and will result in a segmentation fault. |
| 49 | + * |
| 50 | + * So this optimization needs to be reverted in order to bring back |
| 51 | + * compatibility with non-contiguous heaps. Before the next version |
| 52 | + * of newlib becomes available and gets updated in the GCC Arm Embedded |
| 53 | + * Toolchains, we work around this issue by including the fix in Mbed OS. |
| 54 | + * The linker prioritizes malloc() from the project to the one from the |
| 55 | + * toolchain. |
| 56 | + */ |
| 57 | + |
| 58 | +#if defined(__NEWLIB_NANO) |
| 59 | + |
| 60 | +#include <newlib.h> |
| 61 | + |
| 62 | +#if (__NEWLIB__ == 4) && (__NEWLIB_MINOR__ == 1) && (__NEWLIB_PATCHLEVEL__ == 0) |
| 63 | + |
| 64 | +/* Implementation of <<malloc>> <<free>> <<calloc>> <<realloc>>, optional |
| 65 | + * as to be reenterable. |
| 66 | + * |
| 67 | + * Interface documentation refer to malloc.c. |
| 68 | + */ |
| 69 | + |
| 70 | +#include <stdio.h> |
| 71 | +#include <string.h> |
| 72 | +#include <errno.h> |
| 73 | +#include <malloc.h> |
| 74 | + |
| 75 | +#if DEBUG |
| 76 | +#include <assert.h> |
| 77 | +#else |
| 78 | +#define assert(x) ((void)0) |
| 79 | +#endif |
| 80 | + |
| 81 | +#ifndef MAX |
| 82 | +#define MAX(a,b) ((a) >= (b) ? (a) : (b)) |
| 83 | +#endif |
| 84 | + |
| 85 | +#define _SBRK_R(X) _sbrk_r(X) |
| 86 | + |
| 87 | +#include <sys/config.h> |
| 88 | +#include <reent.h> |
| 89 | + |
| 90 | +#define RARG struct _reent *reent_ptr, |
| 91 | +#define RCALL reent_ptr, |
| 92 | + |
| 93 | +#define MALLOC_LOCK __malloc_lock(reent_ptr) |
| 94 | +#define MALLOC_UNLOCK __malloc_unlock(reent_ptr) |
| 95 | + |
| 96 | +#define RERRNO reent_ptr->_errno |
| 97 | + |
| 98 | +#define nano_malloc _malloc_r |
| 99 | + |
| 100 | +/* Redefine names to avoid conflict with user names */ |
| 101 | +#define free_list __malloc_free_list |
| 102 | +#define sbrk_start __malloc_sbrk_start |
| 103 | + |
| 104 | +#define ALIGN_PTR(ptr, align) \ |
| 105 | + (((ptr) + (align) - (intptr_t)1) & ~((align) - (intptr_t)1)) |
| 106 | +#define ALIGN_SIZE(size, align) \ |
| 107 | + (((size) + (align) - (size_t)1) & ~((align) - (size_t)1)) |
| 108 | + |
| 109 | +/* Alignment of allocated block */ |
| 110 | +#define MALLOC_ALIGN (8U) |
| 111 | +#define CHUNK_ALIGN (sizeof(void*)) |
| 112 | +#define MALLOC_PADDING ((MAX(MALLOC_ALIGN, CHUNK_ALIGN)) - CHUNK_ALIGN) |
| 113 | + |
| 114 | +/* as well as the minimal allocation size |
| 115 | + * to hold a free pointer */ |
| 116 | +#define MALLOC_MINSIZE (sizeof(void *)) |
| 117 | +#define MALLOC_PAGE_ALIGN (0x1000) |
| 118 | +#define MAX_ALLOC_SIZE (0x80000000U) |
| 119 | + |
| 120 | +typedef size_t malloc_size_t; |
| 121 | + |
| 122 | +typedef struct malloc_chunk |
| 123 | +{ |
| 124 | + /* -------------------------------------- |
| 125 | + * chunk->| size | |
| 126 | + * -------------------------------------- |
| 127 | + * | Padding for alignment | |
| 128 | + * | This includes padding inserted by | |
| 129 | + * | the compiler (to align fields) and | |
| 130 | + * | explicit padding inserted by this | |
| 131 | + * | implementation. If any explicit | |
| 132 | + * | padding is being used then the | |
| 133 | + * | sizeof (size) bytes at | |
| 134 | + * | mem_ptr - CHUNK_OFFSET must be | |
| 135 | + * | initialized with the negative | |
| 136 | + * | offset to size. | |
| 137 | + * -------------------------------------- |
| 138 | + * mem_ptr->| When allocated: data | |
| 139 | + * | When freed: pointer to next free | |
| 140 | + * | chunk | |
| 141 | + * -------------------------------------- |
| 142 | + */ |
| 143 | + /* size of the allocated payload area, including size before |
| 144 | + CHUNK_OFFSET */ |
| 145 | + long size; |
| 146 | + |
| 147 | + /* since here, the memory is either the next free block, or data load */ |
| 148 | + struct malloc_chunk * next; |
| 149 | +}chunk; |
| 150 | + |
| 151 | + |
| 152 | +#define CHUNK_OFFSET ((malloc_size_t)(&(((struct malloc_chunk *)0)->next))) |
| 153 | + |
| 154 | +/* size of smallest possible chunk. A memory piece smaller than this size |
| 155 | + * won't be able to create a chunk */ |
| 156 | +#define MALLOC_MINCHUNK (CHUNK_OFFSET + MALLOC_PADDING + MALLOC_MINSIZE) |
| 157 | + |
| 158 | +/* List list header of free blocks */ |
| 159 | +chunk * free_list = NULL; |
| 160 | + |
| 161 | +/* Starting point of memory allocated from system */ |
| 162 | +char * sbrk_start = NULL; |
| 163 | + |
| 164 | +/** Function sbrk_aligned |
| 165 | + * Algorithm: |
| 166 | + * Use sbrk() to obtain more memory and ensure it is CHUNK_ALIGN aligned |
| 167 | + * Optimise for the case that it is already aligned - only ask for extra |
| 168 | + * padding after we know we need it |
| 169 | + */ |
| 170 | +static void* sbrk_aligned(RARG malloc_size_t s) |
| 171 | +{ |
| 172 | + char *p, *align_p; |
| 173 | + |
| 174 | + if (sbrk_start == NULL) sbrk_start = _SBRK_R(RCALL 0); |
| 175 | + |
| 176 | + p = _SBRK_R(RCALL s); |
| 177 | + |
| 178 | + /* sbrk returns -1 if fail to allocate */ |
| 179 | + if (p == (void *)-1) |
| 180 | + return p; |
| 181 | + |
| 182 | + align_p = (char*)ALIGN_PTR((uintptr_t)p, CHUNK_ALIGN); |
| 183 | + if (align_p != p) |
| 184 | + { |
| 185 | + /* p is not aligned, ask for a few more bytes so that we have s |
| 186 | + * bytes reserved from align_p. */ |
| 187 | + p = _SBRK_R(RCALL align_p - p); |
| 188 | + if (p == (void *)-1) |
| 189 | + return p; |
| 190 | + } |
| 191 | + return align_p; |
| 192 | +} |
| 193 | + |
| 194 | +/** Function nano_malloc |
| 195 | + * Algorithm: |
| 196 | + * Walk through the free list to find the first match. If fails to find |
| 197 | + * one, call sbrk to allocate a new chunk. |
| 198 | + */ |
| 199 | +void * nano_malloc(RARG malloc_size_t s) |
| 200 | +{ |
| 201 | + chunk *p, *r; |
| 202 | + char * ptr, * align_ptr; |
| 203 | + int offset; |
| 204 | + |
| 205 | + malloc_size_t alloc_size; |
| 206 | + |
| 207 | + alloc_size = ALIGN_SIZE(s, CHUNK_ALIGN); /* size of aligned data load */ |
| 208 | + alloc_size += MALLOC_PADDING; /* padding */ |
| 209 | + alloc_size += CHUNK_OFFSET; /* size of chunk head */ |
| 210 | + alloc_size = MAX(alloc_size, MALLOC_MINCHUNK); |
| 211 | + |
| 212 | + if (alloc_size >= MAX_ALLOC_SIZE || alloc_size < s) |
| 213 | + { |
| 214 | + RERRNO = ENOMEM; |
| 215 | + return NULL; |
| 216 | + } |
| 217 | + |
| 218 | + MALLOC_LOCK; |
| 219 | + |
| 220 | + p = free_list; |
| 221 | + r = p; |
| 222 | + |
| 223 | + while (r) |
| 224 | + { |
| 225 | + int rem = r->size - alloc_size; |
| 226 | + if (rem >= 0) |
| 227 | + { |
| 228 | + if (rem >= MALLOC_MINCHUNK) |
| 229 | + { |
| 230 | + if (p == r) |
| 231 | + { |
| 232 | + /* First item in the list, break it into two chunks |
| 233 | + * and return the first one */ |
| 234 | + r->size = alloc_size; |
| 235 | + free_list = (chunk *)((char *)r + alloc_size); |
| 236 | + free_list->size = rem; |
| 237 | + free_list->next = r->next; |
| 238 | + } else { |
| 239 | + /* Any other item in the list. Split and return |
| 240 | + * the first one */ |
| 241 | + r->size = alloc_size; |
| 242 | + p->next = (chunk *)((char *)r + alloc_size); |
| 243 | + p->next->size = rem; |
| 244 | + p->next->next = r->next; |
| 245 | + } |
| 246 | + } |
| 247 | + /* Find a chunk that is exactly the size or slightly bigger |
| 248 | + * than requested size, just return this chunk */ |
| 249 | + else if (p == r) |
| 250 | + { |
| 251 | + /* Now it implies p==r==free_list. Move the free_list |
| 252 | + * to next chunk */ |
| 253 | + free_list = r->next; |
| 254 | + } |
| 255 | + else |
| 256 | + { |
| 257 | + /* Normal case. Remove it from free_list */ |
| 258 | + p->next = r->next; |
| 259 | + } |
| 260 | + break; |
| 261 | + } |
| 262 | + p=r; |
| 263 | + r=r->next; |
| 264 | + } |
| 265 | + |
| 266 | + /* Failed to find a appropriate chunk. Ask for more memory */ |
| 267 | + if (r == NULL) |
| 268 | + { |
| 269 | + r = sbrk_aligned(RCALL alloc_size); |
| 270 | + |
| 271 | + /* sbrk returns -1 if fail to allocate */ |
| 272 | + if (r == (void *)-1) |
| 273 | + { |
| 274 | + RERRNO = ENOMEM; |
| 275 | + MALLOC_UNLOCK; |
| 276 | + return NULL; |
| 277 | + } |
| 278 | + else |
| 279 | + { |
| 280 | + r->size = alloc_size; |
| 281 | + } |
| 282 | + } |
| 283 | + MALLOC_UNLOCK; |
| 284 | + |
| 285 | + ptr = (char *)r + CHUNK_OFFSET; |
| 286 | + |
| 287 | + align_ptr = (char *)ALIGN_PTR((uintptr_t)ptr, MALLOC_ALIGN); |
| 288 | + offset = align_ptr - ptr; |
| 289 | + |
| 290 | + if (offset) |
| 291 | + { |
| 292 | + /* Initialize sizeof (malloc_chunk.size) bytes at |
| 293 | + align_ptr - CHUNK_OFFSET with negative offset to the |
| 294 | + size field (at the start of the chunk). |
| 295 | +
|
| 296 | + The negative offset to size from align_ptr - CHUNK_OFFSET is |
| 297 | + the size of any remaining padding minus CHUNK_OFFSET. This is |
| 298 | + equivalent to the total size of the padding, because the size of |
| 299 | + any remaining padding is the total size of the padding minus |
| 300 | + CHUNK_OFFSET. |
| 301 | +
|
| 302 | + Note that the size of the padding must be at least CHUNK_OFFSET. |
| 303 | +
|
| 304 | + The rest of the padding is not initialized. */ |
| 305 | + *(long *)((char *)r + offset) = -offset; |
| 306 | + } |
| 307 | + |
| 308 | + assert(align_ptr + size <= (char *)r + alloc_size); |
| 309 | + return align_ptr; |
| 310 | +} |
| 311 | + |
| 312 | +#endif // (__NEWLIB__ == 4) && (__NEWLIB_MINOR__ == 1) && (__NEWLIB_PATCHLEVEL__ == 0) |
| 313 | + |
| 314 | +#endif // defined(__NEWLIB_NANO) |
0 commit comments