|
| 1 | +/* |
| 2 | + * Copyright (c) 2022, Meta |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + */ |
| 6 | + |
| 7 | +#include "kernel_internal.h" |
| 8 | + |
| 9 | +#include <zephyr/kernel.h> |
| 10 | +#include <zephyr/kernel/thread_stack.h> |
| 11 | +#include <zephyr/logging/log.h> |
| 12 | +#include <zephyr/sys/bitarray.h> |
| 13 | + |
| 14 | +LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
| 15 | + |
| 16 | +#if CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0 |
| 17 | +#define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE |
| 18 | +#else |
| 19 | +#define BA_SIZE 1 |
| 20 | +#endif |
| 21 | + |
| 22 | +struct dyn_cb_data { |
| 23 | + k_tid_t tid; |
| 24 | + k_thread_stack_t *stack; |
| 25 | +}; |
| 26 | + |
| 27 | +static K_THREAD_STACK_ARRAY_DEFINE(dynamic_stack, CONFIG_DYNAMIC_THREAD_POOL_SIZE, |
| 28 | + CONFIG_DYNAMIC_THREAD_STACK_SIZE); |
| 29 | +SYS_BITARRAY_DEFINE_STATIC(dynamic_ba, BA_SIZE); |
| 30 | + |
| 31 | +static k_thread_stack_t *z_thread_stack_alloc_dyn(size_t align, size_t size) |
| 32 | +{ |
| 33 | + return z_thread_aligned_alloc(align, size); |
| 34 | +} |
| 35 | + |
| 36 | +static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size) |
| 37 | +{ |
| 38 | + int rv; |
| 39 | + size_t offset; |
| 40 | + k_thread_stack_t *stack; |
| 41 | + |
| 42 | + if (size > CONFIG_DYNAMIC_THREAD_STACK_SIZE) { |
| 43 | + LOG_DBG("stack size %zu is > pool stack size %d", size, |
| 44 | + CONFIG_DYNAMIC_THREAD_STACK_SIZE); |
| 45 | + return NULL; |
| 46 | + } |
| 47 | + |
| 48 | + rv = sys_bitarray_alloc(&dynamic_ba, 1, &offset); |
| 49 | + if (rv < 0) { |
| 50 | + LOG_DBG("unable to allocate stack from pool"); |
| 51 | + return NULL; |
| 52 | + } |
| 53 | + |
| 54 | + __ASSERT_NO_MSG(offset < CONFIG_DYNAMIC_THREAD_POOL_SIZE); |
| 55 | + |
| 56 | + stack = (k_thread_stack_t *)&dynamic_stack[offset]; |
| 57 | + |
| 58 | + return stack; |
| 59 | +} |
| 60 | + |
| 61 | +k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags) |
| 62 | +{ |
| 63 | + size_t align = 0; |
| 64 | + size_t obj_size = 0; |
| 65 | + k_thread_stack_t *stack = NULL; |
| 66 | + |
| 67 | +#ifdef CONFIG_USERSPACE |
| 68 | + if ((flags & K_USER) != 0) { |
| 69 | + align = Z_THREAD_STACK_OBJ_ALIGN(size); |
| 70 | + obj_size = Z_THREAD_STACK_SIZE_ADJUST(size); |
| 71 | + } else |
| 72 | +#endif |
| 73 | + { |
| 74 | + align = Z_KERNEL_STACK_OBJ_ALIGN; |
| 75 | + obj_size = Z_KERNEL_STACK_SIZE_ADJUST(size); |
| 76 | + } |
| 77 | + |
| 78 | + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) { |
| 79 | + stack = z_thread_stack_alloc_dyn(align, obj_size); |
| 80 | + if (stack == NULL && CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { |
| 81 | + stack = z_thread_stack_alloc_pool(size); |
| 82 | + } |
| 83 | + } else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL) && |
| 84 | + CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { |
| 85 | + stack = z_thread_stack_alloc_pool(size); |
| 86 | + if (stack == NULL && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { |
| 87 | + stack = z_thread_stack_alloc_dyn(align, obj_size); |
| 88 | + } |
| 89 | + } else { |
| 90 | + return NULL; |
| 91 | + } |
| 92 | + |
| 93 | + return stack; |
| 94 | +} |
| 95 | + |
| 96 | +#ifdef CONFIG_USERSPACE |
| 97 | +static inline k_thread_stack_t *z_vrfy_k_thread_stack_alloc(size_t size, int flags) |
| 98 | +{ |
| 99 | + return z_impl_k_thread_stack_alloc(size, flags); |
| 100 | +} |
| 101 | +#include <syscalls/k_thread_stack_alloc_mrsh.c> |
| 102 | +#endif |
| 103 | + |
| 104 | +static void dyn_cb(const struct k_thread *thread, void *user_data) |
| 105 | +{ |
| 106 | + struct dyn_cb_data *const data = (struct dyn_cb_data *)user_data; |
| 107 | + |
| 108 | + if (data->stack == (k_thread_stack_t *)thread->stack_info.start) { |
| 109 | + __ASSERT(data->tid == NULL, "stack %p is associated with more than one thread!"); |
| 110 | + data->tid = (k_tid_t)thread; |
| 111 | + } |
| 112 | +} |
| 113 | + |
| 114 | +int z_impl_k_thread_stack_free(k_thread_stack_t *stack) |
| 115 | +{ |
| 116 | + char state_buf[16] = {0}; |
| 117 | + struct dyn_cb_data data = {.stack = stack}; |
| 118 | + |
| 119 | + /* Get a possible tid associated with stack */ |
| 120 | + k_thread_foreach(dyn_cb, &data); |
| 121 | + |
| 122 | + if (data.tid != NULL) { |
| 123 | + /* Check if thread is in use */ |
| 124 | + if (k_thread_state_str(data.tid, state_buf, sizeof(state_buf)) != state_buf) { |
| 125 | + LOG_ERR("tid %p is invalid!", data.tid); |
| 126 | + return -EINVAL; |
| 127 | + } |
| 128 | + |
| 129 | + if (!(strcmp("dummy", state_buf) == 0) || (strcmp("dead", state_buf) == 0)) { |
| 130 | + LOG_ERR("tid %p is in use!", data.tid); |
| 131 | + return -EBUSY; |
| 132 | + } |
| 133 | + } |
| 134 | + |
| 135 | + if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { |
| 136 | + if (IS_ARRAY_ELEMENT(dynamic_stack, stack)) { |
| 137 | + if (sys_bitarray_free(&dynamic_ba, 1, ARRAY_INDEX(dynamic_stack, stack))) { |
| 138 | + LOG_ERR("stack %p is not allocated!", stack); |
| 139 | + return -EINVAL; |
| 140 | + } |
| 141 | + |
| 142 | + return 0; |
| 143 | + } |
| 144 | + } |
| 145 | + |
| 146 | + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { |
| 147 | + k_free(stack); |
| 148 | + } else { |
| 149 | + LOG_ERR("Invalid stack %p", stack); |
| 150 | + return -EINVAL; |
| 151 | + } |
| 152 | + |
| 153 | + return 0; |
| 154 | +} |
| 155 | + |
| 156 | +#ifdef CONFIG_USERSPACE |
| 157 | +static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack) |
| 158 | +{ |
| 159 | + return z_impl_k_thread_stack_free(stack); |
| 160 | +} |
| 161 | +#include <syscalls/k_thread_stack_free_mrsh.c> |
| 162 | +#endif |
0 commit comments