|
| 1 | +/* |
| 2 | + * Copyright (c) 2022, Meta |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + */ |
| 6 | + |
| 7 | +#include <zephyr/kernel.h> |
| 8 | +#include <zephyr/ztest.h> |
| 9 | + |
| 10 | +#define TIMEOUT_MS 500 |
| 11 | + |
| 12 | +#ifdef CONFIG_USERSPACE |
| 13 | +#define STACK_OBJ_SIZE Z_THREAD_STACK_SIZE_ADJUST(CONFIG_DYNAMIC_THREAD_STACK_SIZE) |
| 14 | +#else |
| 15 | +#define STACK_OBJ_SIZE Z_KERNEL_STACK_SIZE_ADJUST(CONFIG_DYNAMIC_THREAD_STACK_SIZE) |
| 16 | +#endif |
| 17 | + |
| 18 | +#define MAX_HEAP_STACKS (CONFIG_HEAP_MEM_POOL_SIZE / STACK_OBJ_SIZE) |
| 19 | + |
| 20 | +ZTEST_DMEM bool flag[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; |
| 21 | + |
| 22 | +static void func(void *arg1, void *arg2, void *arg3) |
| 23 | +{ |
| 24 | + bool *flag = (bool *)arg1; |
| 25 | + |
| 26 | + ARG_UNUSED(arg2); |
| 27 | + ARG_UNUSED(arg3); |
| 28 | + |
| 29 | + printk("Hello, dynamic world!\n"); |
| 30 | + |
| 31 | + *flag = true; |
| 32 | +} |
| 33 | + |
| 34 | +/** @brief Exercise the pool-based thread stack allocator */ |
| 35 | +ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_pool) |
| 36 | +{ |
| 37 | + static k_tid_t tid[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; |
| 38 | + static struct k_thread th[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; |
| 39 | + static k_thread_stack_t *stack[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; |
| 40 | + |
| 41 | + if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) { |
| 42 | + ztest_test_skip(); |
| 43 | + } |
| 44 | + |
| 45 | + /* allocate all thread stacks from the pool */ |
| 46 | + for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { |
| 47 | + stack[i] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, |
| 48 | + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0); |
| 49 | + |
| 50 | + zassert_not_null(stack[i]); |
| 51 | + } |
| 52 | + |
| 53 | + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { |
| 54 | + /* ensure 1 thread can be allocated from the heap when the pool is depleted */ |
| 55 | + zassert_ok(k_thread_stack_free( |
| 56 | + k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, |
| 57 | + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0))); |
| 58 | + } else { |
| 59 | + /* ensure that no more thread stacks can be allocated from the pool */ |
| 60 | + zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, |
| 61 | + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0)); |
| 62 | + } |
| 63 | + |
| 64 | + /* spawn our threads */ |
| 65 | + for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { |
| 66 | + tid[i] = k_thread_create(&th[i], stack[i], |
| 67 | + CONFIG_DYNAMIC_THREAD_STACK_SIZE, func, |
| 68 | + &flag[i], NULL, NULL, 0, |
| 69 | + K_USER | K_INHERIT_PERMS, K_NO_WAIT); |
| 70 | + } |
| 71 | + |
| 72 | + /* join all threads and check that flags have been set */ |
| 73 | + for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { |
| 74 | + zassert_ok(k_thread_join(tid[i], K_MSEC(TIMEOUT_MS))); |
| 75 | + zassert_true(flag[i]); |
| 76 | + } |
| 77 | + |
| 78 | + /* clean up stacks allocated from the pool */ |
| 79 | + for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { |
| 80 | + zassert_ok(k_thread_stack_free(stack[i])); |
| 81 | + } |
| 82 | +} |
| 83 | + |
| 84 | +/** @brief Exercise the heap-based thread stack allocator */ |
| 85 | +ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc) |
| 86 | +{ |
| 87 | + size_t N; |
| 88 | + static k_tid_t tid[MAX_HEAP_STACKS]; |
| 89 | + static bool flag[MAX_HEAP_STACKS]; |
| 90 | + static struct k_thread th[MAX_HEAP_STACKS]; |
| 91 | + static k_thread_stack_t *stack[MAX_HEAP_STACKS]; |
| 92 | + |
| 93 | + if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) { |
| 94 | + ztest_test_skip(); |
| 95 | + } |
| 96 | + |
| 97 | + if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { |
| 98 | + ztest_test_skip(); |
| 99 | + } |
| 100 | + |
| 101 | + /* allocate all thread stacks from the heap */ |
| 102 | + for (N = 0; N < MAX_HEAP_STACKS; ++N) { |
| 103 | + stack[N] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, |
| 104 | + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0); |
| 105 | + zassert_not_null(stack[N]); |
| 106 | + } |
| 107 | + |
| 108 | + if (CONFIG_DYNAMIC_THREAD_POOL_SIZE == 0) { |
| 109 | + /* ensure that no more thread stacks can be allocated from the heap */ |
| 110 | + zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, |
| 111 | + IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0)); |
| 112 | + } |
| 113 | + |
| 114 | + /* spwan our threads */ |
| 115 | + for (size_t i = 0; i < N; ++i) { |
| 116 | + tid[i] = k_thread_create(&th[i], stack[i], 0, func, &flag[i], NULL, NULL, 0, |
| 117 | + K_USER | K_INHERIT_PERMS, K_NO_WAIT); |
| 118 | + } |
| 119 | + |
| 120 | + /* join all threads and check that flags have been set */ |
| 121 | + for (size_t i = 0; i < N; ++i) { |
| 122 | + zassert_ok(k_thread_join(tid[i], K_MSEC(TIMEOUT_MS))); |
| 123 | + zassert_true(flag[i]); |
| 124 | + } |
| 125 | + |
| 126 | + /* clean up stacks allocated from the heap */ |
| 127 | + for (size_t i = 0; i < N; ++i) { |
| 128 | + zassert_ok(k_thread_stack_free(stack[i])); |
| 129 | + } |
| 130 | +} |
| 131 | + |
| 132 | +static void *dynamic_thread_stack_setup(void) |
| 133 | +{ |
| 134 | +#ifdef CONFIG_USERSPACE |
| 135 | + k_thread_system_pool_assign(k_current_get()); |
| 136 | + /* k_thread_access_grant(k_current_get(), ... ); */ |
| 137 | +#endif |
| 138 | + |
| 139 | + return NULL; |
| 140 | +} |
| 141 | + |
| 142 | +ZTEST_SUITE(dynamic_thread_stack, NULL, dynamic_thread_stack_setup, NULL, NULL, NULL); |
0 commit comments