diff --git a/include/zephyr/kernel.h b/include/zephyr/kernel.h index d3a435a4d38a..9a88baeb5bdf 100644 --- a/include/zephyr/kernel.h +++ b/include/zephyr/kernel.h @@ -265,6 +265,35 @@ extern void k_thread_foreach_unlocked( /* end - thread options */ #if !defined(_ASMLANGUAGE) +/** + * @brief Dynamically allocate a thread stack. + * + * Relevant stack creation flags include: + * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`) + * + * @param size Stack size in bytes. + * @param flags Stack creation flags, or 0. + * + * @retval the allocated thread stack on success. + * @retval NULL on failure. + * + * @see CONFIG_DYNAMIC_THREAD + */ +__syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags); + +/** + * @brief Free a dynamically allocated thread stack. + * + * @param stack Pointer to the thread stack. + * + * @retval 0 on success. + * @retval -EBUSY if the thread stack is in use. + * @retval -EINVAL if @p stack is invalid. + * + * @see CONFIG_DYNAMIC_THREAD + */ +__syscall int k_thread_stack_free(k_thread_stack_t *stack); + /** * @brief Create a thread. * diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 7b4781e4fd33..82427e83ba88 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -122,6 +122,12 @@ target_sources_ifdef( userspace.c ) +target_sources_ifdef( + CONFIG_DYNAMIC_THREAD + kernel PRIVATE + dynamic.c + ) + target_include_directories(kernel PRIVATE ${ZEPHYR_BASE}/kernel/include ${ARCH_DIR}/${ARCH}/include diff --git a/kernel/Kconfig b/kernel/Kconfig index e553553b1c74..12cd7b8f836b 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -203,6 +203,68 @@ config THREAD_USERSPACE_LOCAL_DATA depends on USERSPACE default y if ERRNO && !ERRNO_IN_TLS +config DYNAMIC_THREAD + bool "Support for dynamic threads [EXPERIMENTAL]" + select EXPERIMENTAL + depends on THREAD_STACK_INFO + select DYNAMIC_OBJECTS if USERSPACE + help + Enable support for dynamic threads and stacks. + +if DYNAMIC_THREAD + +config DYNAMIC_THREAD_STACK_SIZE + int "Size of each pre-allocated thread stack" + default 1024 if !64BIT + default 2048 if 64BIT + help + Default stack size (in bytes) for dynamic threads. + +config DYNAMIC_THREAD_ALLOC + bool "Support heap-allocated thread objects and stacks" + help + Select this option to enable allocating thread object and + thread stacks from the system heap. + + Only use this type of allocation in situations + where malloc is permitted. + +config DYNAMIC_THREAD_POOL_SIZE + int "Number of statically pre-allocated threads" + default 0 + range 0 8192 + help + Pre-allocate a fixed number of thread objects and + stacks at build time. + + This type of "dynamic" stack is usually suitable in + situations where malloc is not permitted. + +choice DYNAMIC_THREAD_PREFER + prompt "Preferred dynamic thread allocator" + default DYNAMIC_THREAD_PREFER_POOL + help + If both CONFIG_DYNAMIC_THREAD_ALLOC=y and + CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0, then the user may + specify the order in which allocation is attmpted. + +config DYNAMIC_THREAD_PREFER_ALLOC + bool "Prefer heap-based allocation" + depends on DYNAMIC_THREAD_ALLOC + help + Select this option to attempt a heap-based allocation + prior to any pool-based allocation. + +config DYNAMIC_THREAD_PREFER_POOL + bool "Prefer pool-based allocation" + help + Select this option to attempt a pool-based allocation + prior to any heap-based allocation. + +endchoice # DYNAMIC_THREAD_PREFER + +endif # DYNAMIC_THREADS + config LIBC_ERRNO bool help diff --git a/kernel/dynamic.c b/kernel/dynamic.c new file mode 100644 index 000000000000..6f9c1f6adcfe --- /dev/null +++ b/kernel/dynamic.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2022, Meta + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "kernel_internal.h" + +#include +#include +#include +#include + +LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); + +#if CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0 +#define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE +#else +#define BA_SIZE 1 +#endif + +struct dyn_cb_data { + k_tid_t tid; + k_thread_stack_t *stack; +}; + +static K_THREAD_STACK_ARRAY_DEFINE(dynamic_stack, CONFIG_DYNAMIC_THREAD_POOL_SIZE, + CONFIG_DYNAMIC_THREAD_STACK_SIZE); +SYS_BITARRAY_DEFINE_STATIC(dynamic_ba, BA_SIZE); + +static k_thread_stack_t *z_thread_stack_alloc_dyn(size_t align, size_t size) +{ + return z_thread_aligned_alloc(align, size); +} + +static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size) +{ + int rv; + size_t offset; + k_thread_stack_t *stack; + + if (size > CONFIG_DYNAMIC_THREAD_STACK_SIZE) { + LOG_DBG("stack size %zu is > pool stack size %d", size, + CONFIG_DYNAMIC_THREAD_STACK_SIZE); + return NULL; + } + + rv = sys_bitarray_alloc(&dynamic_ba, 1, &offset); + if (rv < 0) { + LOG_DBG("unable to allocate stack from pool"); + return NULL; + } + + __ASSERT_NO_MSG(offset < CONFIG_DYNAMIC_THREAD_POOL_SIZE); + + stack = (k_thread_stack_t *)&dynamic_stack[offset]; + + return stack; +} + +k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags) +{ + size_t align = 0; + size_t obj_size = 0; + k_thread_stack_t *stack = NULL; + +#ifdef CONFIG_USERSPACE + if ((flags & K_USER) != 0) { + align = Z_THREAD_STACK_OBJ_ALIGN(size); + obj_size = Z_THREAD_STACK_SIZE_ADJUST(size); + } else +#endif + { + align = Z_KERNEL_STACK_OBJ_ALIGN; + obj_size = Z_KERNEL_STACK_SIZE_ADJUST(size); + } + + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) { + stack = z_thread_stack_alloc_dyn(align, obj_size); + if (stack == NULL && CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { + stack = z_thread_stack_alloc_pool(size); + } + } else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL) && + CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { + stack = z_thread_stack_alloc_pool(size); + if (stack == NULL && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { + stack = z_thread_stack_alloc_dyn(align, obj_size); + } + } else { + return NULL; + } + + return stack; +} + +#ifdef CONFIG_USERSPACE +static inline k_thread_stack_t *z_vrfy_k_thread_stack_alloc(size_t size, int flags) +{ + return z_impl_k_thread_stack_alloc(size, flags); +} +#include +#endif + +static void dyn_cb(const struct k_thread *thread, void *user_data) +{ + struct dyn_cb_data *const data = (struct dyn_cb_data *)user_data; + + if (data->stack == (k_thread_stack_t *)thread->stack_info.start) { + __ASSERT(data->tid == NULL, "stack %p is associated with more than one thread!"); + data->tid = (k_tid_t)thread; + } +} + +int z_impl_k_thread_stack_free(k_thread_stack_t *stack) +{ + char state_buf[16] = {0}; + struct dyn_cb_data data = {.stack = stack}; + + /* Get a possible tid associated with stack */ + k_thread_foreach(dyn_cb, &data); + + if (data.tid != NULL) { + /* Check if thread is in use */ + if (k_thread_state_str(data.tid, state_buf, sizeof(state_buf)) != state_buf) { + LOG_ERR("tid %p is invalid!", data.tid); + return -EINVAL; + } + + if (!(strcmp("dummy", state_buf) == 0) || (strcmp("dead", state_buf) == 0)) { + LOG_ERR("tid %p is in use!", data.tid); + return -EBUSY; + } + } + + if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { + if (IS_ARRAY_ELEMENT(dynamic_stack, stack)) { + if (sys_bitarray_free(&dynamic_ba, 1, ARRAY_INDEX(dynamic_stack, stack))) { + LOG_ERR("stack %p is not allocated!", stack); + return -EINVAL; + } + + return 0; + } + } + + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { + k_free(stack); + } else { + LOG_ERR("Invalid stack %p", stack); + return -EINVAL; + } + + return 0; +} + +#ifdef CONFIG_USERSPACE +static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack) +{ + return z_impl_k_thread_stack_free(stack); +} +#include +#endif diff --git a/tests/kernel/threads/dynamic_thread_stack/CMakeLists.txt b/tests/kernel/threads/dynamic_thread_stack/CMakeLists.txt new file mode 100644 index 000000000000..871a66a45279 --- /dev/null +++ b/tests/kernel/threads/dynamic_thread_stack/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr HINTS $ENV{ZEPHYR_BASE}) +project(dynamic_thread_stack) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/kernel/threads/dynamic_thread_stack/prj.conf b/tests/kernel/threads/dynamic_thread_stack/prj.conf new file mode 100644 index 000000000000..4377c2ec60b4 --- /dev/null +++ b/tests/kernel/threads/dynamic_thread_stack/prj.conf @@ -0,0 +1,14 @@ +CONFIG_ZTEST=y +CONFIG_ZTEST_NEW_API=y +CONFIG_INIT_STACKS=y +CONFIG_THREAD_STACK_INFO=y +CONFIG_MAX_THREAD_BYTES=5 +CONFIG_DYNAMIC_THREAD=y +CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 +CONFIG_DYNAMIC_THREAD_ALLOC=y +CONFIG_HEAP_MEM_POOL_SIZE=16384 +CONFIG_ZTEST_STACK_SIZE=2048 +CONFIG_MAIN_STACK_SIZE=2048 + +CONFIG_HW_STACK_PROTECTION=n +CONFIG_TEST_HW_STACK_PROTECTION=n diff --git a/tests/kernel/threads/dynamic_thread_stack/src/main.c b/tests/kernel/threads/dynamic_thread_stack/src/main.c new file mode 100644 index 000000000000..86848e4249ee --- /dev/null +++ b/tests/kernel/threads/dynamic_thread_stack/src/main.c @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2022, Meta + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#define TIMEOUT_MS 500 + +#ifdef CONFIG_USERSPACE +#define STACK_OBJ_SIZE Z_THREAD_STACK_SIZE_ADJUST(CONFIG_DYNAMIC_THREAD_STACK_SIZE) +#else +#define STACK_OBJ_SIZE Z_KERNEL_STACK_SIZE_ADJUST(CONFIG_DYNAMIC_THREAD_STACK_SIZE) +#endif + +#define MAX_HEAP_STACKS (CONFIG_HEAP_MEM_POOL_SIZE / STACK_OBJ_SIZE) + +ZTEST_DMEM bool flag[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; + +static void func(void *arg1, void *arg2, void *arg3) +{ + bool *flag = (bool *)arg1; + + ARG_UNUSED(arg2); + ARG_UNUSED(arg3); + + printk("Hello, dynamic world!\n"); + + *flag = true; +} + +/** @brief Exercise the pool-based thread stack allocator */ +ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_pool) +{ + static k_tid_t tid[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; + static struct k_thread th[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; + static k_thread_stack_t *stack[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; + + if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) { + ztest_test_skip(); + } + + /* allocate all thread stacks from the pool */ + for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { + stack[i] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0); + + zassert_not_null(stack[i]); + } + + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { + /* ensure 1 thread can be allocated from the heap when the pool is depleted */ + zassert_ok(k_thread_stack_free( + k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0))); + } else { + /* ensure that no more thread stacks can be allocated from the pool */ + zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0)); + } + + /* spawn our threads */ + for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { + tid[i] = k_thread_create(&th[i], stack[i], + CONFIG_DYNAMIC_THREAD_STACK_SIZE, func, + &flag[i], NULL, NULL, 0, + K_USER | K_INHERIT_PERMS, K_NO_WAIT); + } + + /* join all threads and check that flags have been set */ + for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { + zassert_ok(k_thread_join(tid[i], K_MSEC(TIMEOUT_MS))); + zassert_true(flag[i]); + } + + /* clean up stacks allocated from the pool */ + for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { + zassert_ok(k_thread_stack_free(stack[i])); + } +} + +/** @brief Exercise the heap-based thread stack allocator */ +ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc) +{ + size_t N; + static k_tid_t tid[MAX_HEAP_STACKS]; + static bool flag[MAX_HEAP_STACKS]; + static struct k_thread th[MAX_HEAP_STACKS]; + static k_thread_stack_t *stack[MAX_HEAP_STACKS]; + + if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) { + ztest_test_skip(); + } + + if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { + ztest_test_skip(); + } + + /* allocate all thread stacks from the heap */ + for (N = 0; N < MAX_HEAP_STACKS; ++N) { + stack[N] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0); + zassert_not_null(stack[N]); + } + + if (CONFIG_DYNAMIC_THREAD_POOL_SIZE == 0) { + /* ensure that no more thread stacks can be allocated from the heap */ + zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0)); + } + + /* spwan our threads */ + for (size_t i = 0; i < N; ++i) { + tid[i] = k_thread_create(&th[i], stack[i], 0, func, &flag[i], NULL, NULL, 0, + K_USER | K_INHERIT_PERMS, K_NO_WAIT); + } + + /* join all threads and check that flags have been set */ + for (size_t i = 0; i < N; ++i) { + zassert_ok(k_thread_join(tid[i], K_MSEC(TIMEOUT_MS))); + zassert_true(flag[i]); + } + + /* clean up stacks allocated from the heap */ + for (size_t i = 0; i < N; ++i) { + zassert_ok(k_thread_stack_free(stack[i])); + } +} + +static void *dynamic_thread_stack_setup(void) +{ +#ifdef CONFIG_USERSPACE + k_thread_system_pool_assign(k_current_get()); + /* k_thread_access_grant(k_current_get(), ... ); */ +#endif + + return NULL; +} + +ZTEST_SUITE(dynamic_thread_stack, NULL, dynamic_thread_stack_setup, NULL, NULL, NULL); diff --git a/tests/kernel/threads/dynamic_thread_stack/testcase.yaml b/tests/kernel/threads/dynamic_thread_stack/testcase.yaml new file mode 100644 index 000000000000..3bf7d892b579 --- /dev/null +++ b/tests/kernel/threads/dynamic_thread_stack/testcase.yaml @@ -0,0 +1,76 @@ +common: + tags: kernel security + min_ram: 32 + integration_platforms: + - qemu_x86 + - qemu_x86_nommu + - qemu_x86_64 + - qemu_cortex_a53 + - qemu_cortex_a53_smp + - qemu_cortex_m3 + - qemu_riscv32 + - qemu_riscv32e + - qemu_riscv64 + - qemu_riscv64_smp + +# Permutations of (pool | alloc | user) +tests: + kernel.threads.dynamic_thread.stack.no_pool.no_alloc.no_user: + extra_configs: + # 000 + - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 + - CONFIG_DYNAMIC_THREAD_ALLOC=n + - CONFIG_USERSPACE=n + + # kernel.threads.dynamic_thread.stack.no_pool.no_alloc.user: + # tags: userspace + # extra_configs: + # # 001 + # - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 + # - CONFIG_DYNAMIC_THREAD_ALLOC=n + # - CONFIG_USERSPACE=y + + kernel.threads.dynamic_thread.stack.no_pool.alloc.no_user: + extra_configs: + # 010 + - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 + - CONFIG_DYNAMIC_THREAD_ALLOC=y + - CONFIG_USERSPACE=n + + # kernel.threads.dynamic_thread.stack.no_pool.alloc.user: + # tags: userspace + # extra_configs: + # # 011 + # - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 + # - CONFIG_DYNAMIC_THREAD_ALLOC=y + # - CONFIG_USERSPACE=y + + kernel.threads.dynamic_thread.stack.pool.no_alloc.no_user: + extra_configs: + # 100 + - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 + - CONFIG_DYNAMIC_THREAD_ALLOC=n + - CONFIG_USERSPACE=n + + # kernel.threads.dynamic_thread.stack.pool.no_alloc.user: + # tags: userspace + # extra_configs: + # # 101 + # - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 + # - CONFIG_DYNAMIC_THREAD_ALLOC=n + # - CONFIG_USERSPACE=y + + kernel.threads.dynamic_thread.stack.pool.alloc.no_user: + extra_configs: + # 110 + - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 + - CONFIG_DYNAMIC_THREAD_ALLOC=y + - CONFIG_USERSPACE=n + +# kernel.threads.dynamic_thread.stack.pool.alloc.user: +# tags: userspace +# extra_configs: +# # 111 +# - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 +# - CONFIG_DYNAMIC_THREAD_ALLOC=y +# - CONFIG_USERSPACE=y