diff --git a/include/zephyr/sys/kobject.h b/include/zephyr/sys/kobject.h index fa6303754282..64db4d781c92 100644 --- a/include/zephyr/sys/kobject.h +++ b/include/zephyr/sys/kobject.h @@ -248,7 +248,8 @@ static inline void k_object_access_all_grant(const void *object) * state, with the calling thread being granted permission on it. The memory * for the object will be allocated out of the calling thread's resource pool. * - * Currently, allocation of thread stacks is not supported. + * @note Thread stack object has to use k_object_alloc_size() since stacks may + * have different sizes. * * @param otype Requested kernel object type * @return A pointer to the allocated kernel object, or NULL if memory wasn't @@ -256,6 +257,24 @@ static inline void k_object_access_all_grant(const void *object) */ __syscall void *k_object_alloc(enum k_objects otype); +/** + * Allocate a kernel object of a designated type and a given size + * + * This will instantiate at runtime a kernel object of the specified type, + * returning a pointer to it. The object will be returned in an uninitialized + * state, with the calling thread being granted permission on it. The memory + * for the object will be allocated out of the calling thread's resource pool. + * + * This function is specially helpful for thread stack objects because + * their sizes can vary. Other objects should probably look k_object_alloc(). + * + * @param otype Requested kernel object type + * @param size Requested kernel object size + * @return A pointer to the allocated kernel object, or NULL if memory wasn't + * available + */ +__syscall void *k_object_alloc_size(enum k_objects otype, size_t size); + /** * Allocate memory and install as a generic kernel object * @@ -322,6 +341,15 @@ static inline void *z_impl_k_object_alloc(enum k_objects otype) return NULL; } +static inline void *z_impl_k_object_alloc_size(enum k_objects otype, + size_t size) +{ + ARG_UNUSED(otype); + ARG_UNUSED(size); + + return NULL; +} + static inline struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size) { diff --git a/kernel/dynamic.c b/kernel/dynamic.c index 6f9c1f6adcfe..3c7a624f644e 100644 --- a/kernel/dynamic.c +++ b/kernel/dynamic.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); @@ -39,6 +41,8 @@ static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size) size_t offset; k_thread_stack_t *stack; + size = Z_KERNEL_STACK_SIZE_ADJUST(size); + if (size > CONFIG_DYNAMIC_THREAD_STACK_SIZE) { LOG_DBG("stack size %zu is > pool stack size %d", size, CONFIG_DYNAMIC_THREAD_STACK_SIZE); @@ -58,36 +62,40 @@ static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size) return stack; } -k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags) +static k_thread_stack_t *stack_alloc_dyn(size_t size, int flags) { - size_t align = 0; - size_t obj_size = 0; - k_thread_stack_t *stack = NULL; - -#ifdef CONFIG_USERSPACE - if ((flags & K_USER) != 0) { - align = Z_THREAD_STACK_OBJ_ALIGN(size); - obj_size = Z_THREAD_STACK_SIZE_ADJUST(size); - } else + if ((flags & K_USER) == K_USER) { +#ifdef CONFIG_DYNAMIC_OBJECTS + return k_object_alloc_size(K_OBJ_THREAD_STACK_ELEMENT, size); +#else + /* Dynamic user stack needs a kobject, so if this option is not + * enabled we can't proceed. + */ + return NULL; #endif - { - align = Z_KERNEL_STACK_OBJ_ALIGN; - obj_size = Z_KERNEL_STACK_SIZE_ADJUST(size); } + return z_thread_stack_alloc_dyn(Z_KERNEL_STACK_OBJ_ALIGN, + Z_KERNEL_STACK_SIZE_ADJUST(size)); +} + +k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags) +{ + k_thread_stack_t *stack = NULL; + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) { - stack = z_thread_stack_alloc_dyn(align, obj_size); + stack = stack_alloc_dyn(size, flags); if (stack == NULL && CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { stack = z_thread_stack_alloc_pool(size); } - } else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL) && - CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { - stack = z_thread_stack_alloc_pool(size); - if (stack == NULL && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { - stack = z_thread_stack_alloc_dyn(align, obj_size); + } else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) { + if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { + stack = z_thread_stack_alloc_pool(size); + } + + if ((stack == NULL) && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { + stack = stack_alloc_dyn(size, flags); } - } else { - return NULL; } return stack; @@ -144,7 +152,15 @@ int z_impl_k_thread_stack_free(k_thread_stack_t *stack) } if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { +#ifdef CONFIG_USERSPACE + if (z_object_find(stack)) { + k_object_free(stack); + } else { + k_free(stack); + } +#else k_free(stack); +#endif } else { LOG_ERR("Invalid stack %p", stack); return -EINVAL; diff --git a/kernel/userspace.c b/kernel/userspace.c index 9d8611a3873a..f9a5be98a27e 100644 --- a/kernel/userspace.c +++ b/kernel/userspace.c @@ -47,8 +47,27 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); * not. */ #ifdef CONFIG_DYNAMIC_OBJECTS -static struct k_spinlock lists_lock; /* kobj rbtree/dlist */ +static struct k_spinlock lists_lock; /* kobj dlist */ static struct k_spinlock objfree_lock; /* k_object_free */ + +#ifdef CONFIG_GEN_PRIV_STACKS +/* On ARM MPU we may have two different alignment requirement + * when dynamically allocating thread stacks, one for the privileged + * stack and other for the user stack, so we need to account the + * worst alignment scenario and reserve space for that. + */ +#ifdef CONFIG_ARM_MPU +#define STACK_ELEMENT_DATA_SIZE(size) \ + (sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \ + Z_THREAD_STACK_OBJ_ALIGN(size) + Z_THREAD_STACK_SIZE_ADJUST(size)) +#else +#define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \ + Z_THREAD_STACK_SIZE_ADJUST(size)) +#endif /* CONFIG_ARM_MPU */ +#else +#define STACK_ELEMENT_DATA_SIZE(size) Z_THREAD_STACK_SIZE_ADJUST(size) +#endif /* CONFIG_GEN_PRIV_STACKS */ + #endif static struct k_spinlock obj_lock; /* kobj struct data */ @@ -130,32 +149,48 @@ uint8_t *z_priv_stack_find(k_thread_stack_t *stack) #define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *)) #endif +#ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE +#ifndef CONFIG_MPU_STACK_GUARD +#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \ + Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE) +#else +#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \ + Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE) +#endif /* !CONFIG_MPU_STACK_GUARD */ +#else +#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \ + Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN) +#endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */ + #define DYN_OBJ_DATA_ALIGN \ MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *))) -struct dyn_obj { +struct dyn_obj_base { struct z_object kobj; - sys_dnode_t dobj_list; - struct rbnode node; /* must be immediately before data member */ + sys_dnode_t dobj_list; /* must be immediately before data member */ +}; + +struct dyn_obj { + struct dyn_obj_base base; /* The object itself */ uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD); }; +/* Thread stacks impose a very restrict alignment. Use this alignment + * (generally page size) for all objects will cause a lot waste memory. + */ +struct dyn_obj_stack { + struct dyn_obj_base base; + + /* The object itself */ + void *data; +}; + extern struct z_object *z_object_gperf_find(const void *obj); extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context); -static bool node_lessthan(struct rbnode *a, struct rbnode *b); - -/* - * Red/black tree of allocated kernel objects, for reasonably fast lookups - * based on object pointer values. - */ -static struct rbtree obj_rb_tree = { - .lessthan_fn = node_lessthan -}; - /* * Linked list of allocated kernel objects, for iteration over all allocated * objects (and potentially deleting them during iteration). @@ -163,8 +198,7 @@ static struct rbtree obj_rb_tree = { static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list); /* - * TODO: Write some hash table code that will replace both obj_rb_tree - * and obj_list. + * TODO: Write some hash table code that will replace obj_list. */ static size_t obj_size_get(enum k_objects otype) @@ -193,6 +227,9 @@ static size_t obj_align_get(enum k_objects otype) ret = __alignof(struct dyn_obj); #endif break; + case K_OBJ_THREAD_STACK_ELEMENT: + ret = __alignof(struct dyn_obj_stack); + break; default: ret = __alignof(struct dyn_obj); break; @@ -201,44 +238,31 @@ static size_t obj_align_get(enum k_objects otype) return ret; } -static bool node_lessthan(struct rbnode *a, struct rbnode *b) -{ - return a < b; -} - -static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node) -{ - return CONTAINER_OF(node, struct dyn_obj, node); -} - -static inline struct rbnode *dyn_obj_to_node(void *obj) -{ - struct dyn_obj *dobj = CONTAINER_OF(obj, struct dyn_obj, data); - - return &dobj->node; -} - -static struct dyn_obj *dyn_object_find(void *obj) +static struct dyn_obj_base *dyn_object_find(void *obj) { - struct rbnode *node; - struct dyn_obj *ret; + struct dyn_obj_base *node; + k_spinlock_key_t key; /* For any dynamically allocated kernel object, the object * pointer is just a member of the containing struct dyn_obj, * so just a little arithmetic is necessary to locate the * corresponding struct rbnode */ - node = dyn_obj_to_node(obj); + key = k_spin_lock(&lists_lock); - k_spinlock_key_t key = k_spin_lock(&lists_lock); - if (rb_contains(&obj_rb_tree, node)) { - ret = node_to_dyn_obj(node); - } else { - ret = NULL; + SYS_DLIST_FOR_EACH_CONTAINER(&obj_list, node, dobj_list) { + if (node->kobj.name == obj) { + goto end; + } } + + /* No object found */ + node = NULL; + + end: k_spin_unlock(&lists_lock, key); - return ret; + return node; } /** @@ -304,31 +328,84 @@ static void thread_idx_free(uintptr_t tidx) sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx); } -struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size) +static struct z_object *dynamic_object_create(enum k_objects otype, size_t align, + size_t size) { - struct dyn_obj *dyn; + struct dyn_obj_base *dyn; - dyn = z_thread_aligned_alloc(align, sizeof(*dyn) + size); - if (dyn == NULL) { - LOG_ERR("could not allocate kernel object, out of memory"); - return NULL; + if (otype == K_OBJ_THREAD_STACK_ELEMENT) { + struct dyn_obj_stack *stack; + size_t adjusted_size; + + if (size == 0) { + return NULL; + } + + adjusted_size = STACK_ELEMENT_DATA_SIZE(size); + stack = z_thread_aligned_alloc(align, sizeof(struct dyn_obj_stack)); + if (stack == NULL) { + return NULL; + } + dyn = &stack->base; + + stack->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK, + adjusted_size); + if (stack->data == NULL) { + k_free(stack); + return NULL; + } + +#ifdef CONFIG_GEN_PRIV_STACKS + struct z_stack_data *stack_data = (struct z_stack_data *) + ((uint8_t *)stack->data + adjusted_size - sizeof(*stack_data)); + stack_data->priv = (uint8_t *)stack->data; + dyn->kobj.data.stack_data = stack_data; +#ifdef CONFIG_ARM_MPU + dyn->kobj.name = (void *)ROUND_UP( + ((uint8_t *)stack->data + CONFIG_PRIVILEGED_STACK_SIZE), + Z_THREAD_STACK_OBJ_ALIGN(size)); +#else + dyn->kobj.name = stack->data; +#endif +#else + dyn->kobj.name = stack->data; +#endif + } else { + struct dyn_obj *obj; + + obj = z_thread_aligned_alloc(align, + sizeof(struct dyn_obj) + obj_size_get(otype) + size); + if (obj == NULL) { + return NULL; + } + dyn = &obj->base; + dyn->kobj.name = &obj->data; } - dyn->kobj.name = &dyn->data; - dyn->kobj.type = K_OBJ_ANY; + dyn->kobj.type = otype; dyn->kobj.flags = 0; (void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES); k_spinlock_key_t key = k_spin_lock(&lists_lock); - rb_insert(&obj_rb_tree, &dyn->node); sys_dlist_append(&obj_list, &dyn->dobj_list); k_spin_unlock(&lists_lock, key); return &dyn->kobj; } -void *z_impl_k_object_alloc(enum k_objects otype) +struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size) +{ + struct z_object *obj = dynamic_object_create(K_OBJ_ANY, align, size); + + if (obj == NULL) { + LOG_ERR("could not allocate kernel object, out of memory"); + } + + return obj; +} + +static void *z_object_alloc(enum k_objects otype, size_t size) { struct z_object *zo; uintptr_t tidx = 0; @@ -348,7 +425,6 @@ void *z_impl_k_object_alloc(enum k_objects otype) /* The following are currently not allowed at all */ case K_OBJ_FUTEX: /* Lives in user memory */ case K_OBJ_SYS_MUTEX: /* Lives in user memory */ - case K_OBJ_THREAD_STACK_ELEMENT: /* No aligned allocator */ case K_OBJ_NET_SOCKET: /* Indeterminate size */ LOG_ERR("forbidden object type '%s' requested", otype_to_str(otype)); @@ -358,15 +434,13 @@ void *z_impl_k_object_alloc(enum k_objects otype) break; } - zo = z_dynamic_object_aligned_create(obj_align_get(otype), - obj_size_get(otype)); + zo = dynamic_object_create(otype, obj_align_get(otype), size); if (zo == NULL) { if (otype == K_OBJ_THREAD) { thread_idx_free(tidx); } return NULL; } - zo->type = otype; if (otype == K_OBJ_THREAD) { zo->data.thread_id = tidx; @@ -385,9 +459,19 @@ void *z_impl_k_object_alloc(enum k_objects otype) return zo->name; } +void *z_impl_k_object_alloc(enum k_objects otype) +{ + return z_object_alloc(otype, 0); +} + +void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size) +{ + return z_object_alloc(otype, size); +} + void k_object_free(void *obj) { - struct dyn_obj *dyn; + struct dyn_obj_base *dyn; /* This function is intentionally not exposed to user mode. * There's currently no robust way to track that an object isn't @@ -398,7 +482,6 @@ void k_object_free(void *obj) dyn = dyn_object_find(obj); if (dyn != NULL) { - rb_remove(&obj_rb_tree, &dyn->node); sys_dlist_remove(&dyn->dobj_list); if (dyn->kobj.type == K_OBJ_THREAD) { @@ -419,15 +502,15 @@ struct z_object *z_object_find(const void *obj) ret = z_object_gperf_find(obj); if (ret == NULL) { - struct dyn_obj *dynamic_obj; + struct dyn_obj_base *dyn; /* The cast to pointer-to-non-const violates MISRA * 11.8 but is justified since we know dynamic objects * were not declared with a const qualifier. */ - dynamic_obj = dyn_object_find((void *)obj); - if (dynamic_obj != NULL) { - ret = &dynamic_obj->kobj; + dyn = dyn_object_find((void *)obj); + if (dyn != NULL) { + ret = &dyn->kobj; } } @@ -436,7 +519,7 @@ struct z_object *z_object_find(const void *obj) void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context) { - struct dyn_obj *obj, *next; + struct dyn_obj_base *obj, *next; z_object_gperf_wordlist_foreach(func, context); @@ -476,7 +559,7 @@ static void unref_check(struct z_object *ko, uintptr_t index) void *vko = ko; - struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj); + struct dyn_obj_base *dyn = CONTAINER_OF(vko, struct dyn_obj_base, kobj); __ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object"); @@ -508,7 +591,6 @@ static void unref_check(struct z_object *ko, uintptr_t index) break; } - rb_remove(&obj_rb_tree, &dyn->node); sys_dlist_remove(&dyn->dobj_list); k_free(dyn); out: diff --git a/kernel/userspace_handler.c b/kernel/userspace_handler.c index 6531e83791ce..80f9325b7de2 100644 --- a/kernel/userspace_handler.c +++ b/kernel/userspace_handler.c @@ -65,3 +65,9 @@ static inline void *z_vrfy_k_object_alloc(enum k_objects otype) return z_impl_k_object_alloc(otype); } #include + +static inline void *z_vrfy_k_object_alloc_size(enum k_objects otype, size_t size) +{ + return z_impl_k_object_alloc_size(otype, size); +} +#include diff --git a/tests/kernel/threads/dynamic_thread_stack/prj.conf b/tests/kernel/threads/dynamic_thread_stack/prj.conf index 4377c2ec60b4..07c01bd661f4 100644 --- a/tests/kernel/threads/dynamic_thread_stack/prj.conf +++ b/tests/kernel/threads/dynamic_thread_stack/prj.conf @@ -6,7 +6,7 @@ CONFIG_MAX_THREAD_BYTES=5 CONFIG_DYNAMIC_THREAD=y CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 CONFIG_DYNAMIC_THREAD_ALLOC=y -CONFIG_HEAP_MEM_POOL_SIZE=16384 +CONFIG_HEAP_MEM_POOL_SIZE=20480 CONFIG_ZTEST_STACK_SIZE=2048 CONFIG_MAIN_STACK_SIZE=2048 diff --git a/tests/kernel/threads/dynamic_thread_stack/src/main.c b/tests/kernel/threads/dynamic_thread_stack/src/main.c index 86848e4249ee..e72f4d2f51b4 100644 --- a/tests/kernel/threads/dynamic_thread_stack/src/main.c +++ b/tests/kernel/threads/dynamic_thread_stack/src/main.c @@ -17,7 +17,7 @@ #define MAX_HEAP_STACKS (CONFIG_HEAP_MEM_POOL_SIZE / STACK_OBJ_SIZE) -ZTEST_DMEM bool flag[CONFIG_DYNAMIC_THREAD_POOL_SIZE]; +ZTEST_DMEM bool flag[MAX(CONFIG_DYNAMIC_THREAD_POOL_SIZE, MAX_HEAP_STACKS)]; static void func(void *arg1, void *arg2, void *arg3) { @@ -63,6 +63,7 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_pool) /* spawn our threads */ for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) { + flag[i] = false; tid[i] = k_thread_create(&th[i], stack[i], CONFIG_DYNAMIC_THREAD_STACK_SIZE, func, &flag[i], NULL, NULL, 0, @@ -86,7 +87,6 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc) { size_t N; static k_tid_t tid[MAX_HEAP_STACKS]; - static bool flag[MAX_HEAP_STACKS]; static struct k_thread th[MAX_HEAP_STACKS]; static k_thread_stack_t *stack[MAX_HEAP_STACKS]; @@ -102,18 +102,17 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc) for (N = 0; N < MAX_HEAP_STACKS; ++N) { stack[N] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0); - zassert_not_null(stack[N]); - } - - if (CONFIG_DYNAMIC_THREAD_POOL_SIZE == 0) { - /* ensure that no more thread stacks can be allocated from the heap */ - zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, - IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0)); + if (stack[N] == NULL) { + break; + } } /* spwan our threads */ for (size_t i = 0; i < N; ++i) { - tid[i] = k_thread_create(&th[i], stack[i], 0, func, &flag[i], NULL, NULL, 0, + flag[i] = false; + tid[i] = k_thread_create(&th[i], stack[i], + CONFIG_DYNAMIC_THREAD_STACK_SIZE, func, + &flag[i], NULL, NULL, 0, K_USER | K_INHERIT_PERMS, K_NO_WAIT); } diff --git a/tests/kernel/threads/dynamic_thread_stack/testcase.yaml b/tests/kernel/threads/dynamic_thread_stack/testcase.yaml index 3bf7d892b579..69f5651406f9 100644 --- a/tests/kernel/threads/dynamic_thread_stack/testcase.yaml +++ b/tests/kernel/threads/dynamic_thread_stack/testcase.yaml @@ -21,56 +21,53 @@ tests: - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 - CONFIG_DYNAMIC_THREAD_ALLOC=n - CONFIG_USERSPACE=n - - # kernel.threads.dynamic_thread.stack.no_pool.no_alloc.user: - # tags: userspace - # extra_configs: - # # 001 - # - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 - # - CONFIG_DYNAMIC_THREAD_ALLOC=n - # - CONFIG_USERSPACE=y - + kernel.threads.dynamic_thread.stack.no_pool.no_alloc.user: + tags: userspace + extra_configs: + # 001 + - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 + - CONFIG_DYNAMIC_THREAD_ALLOC=n + - CONFIG_USERSPACE=y kernel.threads.dynamic_thread.stack.no_pool.alloc.no_user: extra_configs: # 010 - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 - CONFIG_DYNAMIC_THREAD_ALLOC=y + - CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y - CONFIG_USERSPACE=n - - # kernel.threads.dynamic_thread.stack.no_pool.alloc.user: - # tags: userspace - # extra_configs: - # # 011 - # - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 - # - CONFIG_DYNAMIC_THREAD_ALLOC=y - # - CONFIG_USERSPACE=y - + kernel.threads.dynamic_thread.stack.no_pool.alloc.user: + tags: userspace + extra_configs: + # 011 + - CONFIG_DYNAMIC_THREAD_POOL_SIZE=0 + - CONFIG_DYNAMIC_THREAD_ALLOC=y + - CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y + - CONFIG_USERSPACE=y kernel.threads.dynamic_thread.stack.pool.no_alloc.no_user: extra_configs: # 100 - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 - CONFIG_DYNAMIC_THREAD_ALLOC=n - CONFIG_USERSPACE=n - - # kernel.threads.dynamic_thread.stack.pool.no_alloc.user: - # tags: userspace - # extra_configs: - # # 101 - # - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 - # - CONFIG_DYNAMIC_THREAD_ALLOC=n - # - CONFIG_USERSPACE=y - + kernel.threads.dynamic_thread.stack.pool.no_alloc.user: + tags: userspace + extra_configs: + # 101 + - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 + - CONFIG_DYNAMIC_THREAD_ALLOC=n + - CONFIG_USERSPACE=y kernel.threads.dynamic_thread.stack.pool.alloc.no_user: extra_configs: # 110 + - CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 - CONFIG_DYNAMIC_THREAD_ALLOC=y - CONFIG_USERSPACE=n - -# kernel.threads.dynamic_thread.stack.pool.alloc.user: -# tags: userspace -# extra_configs: -# # 111 -# - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 -# - CONFIG_DYNAMIC_THREAD_ALLOC=y -# - CONFIG_USERSPACE=y + kernel.threads.dynamic_thread.stack.pool.alloc.user: + tags: userspace + extra_configs: + # 111 + - CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y + - CONFIG_DYNAMIC_THREAD_POOL_SIZE=2 + - CONFIG_DYNAMIC_THREAD_ALLOC=y + - CONFIG_USERSPACE=y