-
Notifications
You must be signed in to change notification settings - Fork 7.4k
libc: thread-safe newlib #21518
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
libc: thread-safe newlib #21518
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -493,6 +493,10 @@ struct _thread_base { | |
u8_t cpu_mask; | ||
#endif | ||
|
||
#ifdef CONFIG_NEWLIB_LIBC | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It would be desirable to add a separate newlib config symbol that enables This symbol can be There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If there is definitelly only one thread running I think that's a good idea. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yeah this would be good, we have some use-cases for disabling mulithreading and using Zephyr more like a HAL (bootloaders, for example) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How do you think about about the other switch to reserve memory for the locks? Is it ok, to have one? Would you set the default value to 0? Actually, I dont like to set it to 0 as only the malloc_lock hooks work in this szenario, but I was concerned that existing applications could run out of memory in case I choose a too little default value |
||
struct _reent k_reent; | ||
#endif | ||
|
||
/* data returned by APIs */ | ||
void *swap_data; | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,6 +16,9 @@ | |
#include <app_memory/app_memdomain.h> | ||
#include <init.h> | ||
#include <sys/sem.h> | ||
#include <sys/mutex.h> | ||
#include <stdlib.h> | ||
#include <sys/lock.h> | ||
|
||
#define LIBC_BSS K_APP_BMEM(z_libc_partition) | ||
#define LIBC_DATA K_APP_DMEM(z_libc_partition) | ||
|
@@ -239,14 +242,11 @@ __weak void _exit(int status) | |
} | ||
} | ||
|
||
static LIBC_DATA SYS_SEM_DEFINE(heap_sem, 1, 1); | ||
|
||
void *_sbrk(int count) | ||
{ | ||
void *ret, *ptr; | ||
|
||
sys_sem_take(&heap_sem, K_FOREVER); | ||
|
||
#if CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE | ||
ptr = heap_base + heap_sz; | ||
#else | ||
|
@@ -260,8 +260,6 @@ void *_sbrk(int count) | |
ret = (void *)-1; | ||
} | ||
|
||
sys_sem_give(&heap_sem); | ||
|
||
return ret; | ||
} | ||
__weak FUNC_ALIAS(_sbrk, sbrk, void *); | ||
|
@@ -270,3 +268,162 @@ __weak int *__errno(void) | |
{ | ||
return z_errno(); | ||
} | ||
|
||
|
||
#if !defined(_RETARGETABLE_LOCKING) || \ | ||
CONFIG_NEWLIB_LIBC_DYNAMIC_LOCK_MEM_SIZE == 0 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what happens if this block isn't compiled? (i.e. someone set the dynamic lock mem size to 0) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
||
static LIBC_DATA SYS_MUTEX_DEFINE(heap_mtx); | ||
|
||
void __malloc_lock(struct _reent *reent) | ||
{ | ||
sys_mutex_lock(&heap_mtx, K_FOREVER); | ||
} | ||
|
||
void __malloc_unlock(struct _reent *reent) | ||
{ | ||
sys_mutex_unlock(&heap_mtx); | ||
} | ||
|
||
#else | ||
#include <sys/mempool.h> | ||
|
||
#define NEWLIB_LOCK_POOL_SIZE WB_UP(CONFIG_NEWLIB_LIBC_DYNAMIC_LOCK_MEM_SIZE) | ||
|
||
#define NEWLIB_LOCK_FRAG_SIZE WB_UP(sizeof(void *) + sizeof(struct k_mutex)) | ||
|
||
K_APPMEM_PARTITION_DEFINE(z_malloc_lock_partition); | ||
#define NEWLIB_LOCK_SECTION K_APP_DMEM_SECTION(z_malloc_lock_partition) | ||
|
||
|
||
SYS_MEM_POOL_DEFINE(z_nl_lock_pool, NULL, | ||
NEWLIB_LOCK_FRAG_SIZE, NEWLIB_LOCK_POOL_SIZE, | ||
1, sizeof(void *), NEWLIB_LOCK_SECTION); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This creates a dependency between newlib and mempool that didn't exist before. Generally those have been either/or: an application will use a heap managed by mempool or by newlib, not both. Now we need to include both variants. Isn't there a way to repurpose the newlib heap code to do this? And if there's not, you probably want to be looking at the Zephyr mem_slab and not mem_pool, as AFAICT all allocations are of the same object size. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we do have a pool of objects here, slab would be better...however, mem slabs can't be used from user mode. sys_mem_pool can, the whole object lives in user memory (we route it to the libc memory domain with NEWLIB_LOCK_SECTION) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I used malloc in first place, but it's kind of heavy. Is there any action for me? |
||
|
||
struct sys_mutex __lock___sinit_recursive_mutex; | ||
struct sys_mutex __lock___sfp_recursive_mutex; | ||
struct sys_mutex __lock___atexit_recursive_mutex; | ||
struct sys_mutex __lock___malloc_recursive_mutex; | ||
struct sys_mutex __lock___env_recursive_mutex; | ||
struct sys_sem __lock___at_quick_exit_mutex; | ||
struct sys_sem __lock___tz_mutex; | ||
struct sys_sem __lock___dd_hash_mutex; | ||
struct sys_sem __lock___arc4random_mutex; | ||
|
||
static int nl_prepare_hooks(struct device *unused) | ||
{ | ||
ARG_UNUSED(unused); | ||
|
||
sys_mem_pool_init(&z_nl_lock_pool); | ||
|
||
sys_mutex_init(&__lock___sinit_recursive_mutex); | ||
sys_mutex_init(&__lock___sfp_recursive_mutex); | ||
sys_mutex_init(&__lock___atexit_recursive_mutex); | ||
sys_mutex_init(&__lock___malloc_recursive_mutex); | ||
sys_mutex_init(&__lock___env_recursive_mutex); | ||
|
||
sys_sem_init(&__lock___at_quick_exit_mutex, 1, 1); | ||
sys_sem_init(&__lock___tz_mutex, 1, 1); | ||
sys_sem_init(&__lock___dd_hash_mutex, 1, 1); | ||
sys_sem_init(&__lock___arc4random_mutex, 1, 1); | ||
|
||
return 0; | ||
} | ||
|
||
SYS_INIT(nl_prepare_hooks, PRE_KERNEL_1, | ||
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); | ||
|
||
|
||
|
||
|
||
void __retarget_lock_init(_LOCK_T *lock) | ||
{ | ||
*lock = sys_mem_pool_alloc(&z_nl_lock_pool, NEWLIB_LOCK_FRAG_SIZE); | ||
|
||
__ASSERT(*lock, "failed to allocate memory for newlib lock"); | ||
|
||
if (*lock != NULL) { | ||
sys_sem_init((struct sys_sem *) (*lock), 1, 1); | ||
} | ||
} | ||
|
||
void __retarget_lock_close(_LOCK_T lock) | ||
{ | ||
if (lock != NULL) { | ||
sys_mem_pool_free(lock); | ||
} | ||
} | ||
|
||
void __retarget_lock_acquire(_LOCK_T lock) | ||
{ | ||
if (lock != NULL) { | ||
sys_sem_take((struct sys_sem *) lock , K_FOREVER); | ||
} | ||
} | ||
|
||
int __retarget_lock_try_acquire(_LOCK_T lock) | ||
{ | ||
if (lock != NULL) { | ||
if (sys_sem_take((struct sys_sem *) lock, | ||
K_NO_WAIT) == 0) { | ||
return 1; | ||
} | ||
} | ||
return 0; | ||
} | ||
|
||
void __retarget_lock_release(_LOCK_T lock) | ||
{ | ||
if (lock != NULL) { | ||
sys_sem_give((struct sys_sem *) lock ); | ||
} | ||
} | ||
|
||
|
||
void __retarget_lock_init_recursive(_LOCK_T *lock) | ||
{ | ||
*lock = sys_mem_pool_alloc(&z_nl_lock_pool, NEWLIB_LOCK_FRAG_SIZE); | ||
|
||
__ASSERT(*lock, "failed to allocate memory for newlib lock"); | ||
|
||
if (*lock != NULL) { | ||
sys_mutex_init((struct sys_mutex *) *lock); | ||
} | ||
} | ||
|
||
void __retarget_lock_close_recursive(_LOCK_T lock) | ||
{ | ||
if (lock != NULL) { | ||
sys_mem_pool_free(lock); | ||
} | ||
} | ||
|
||
|
||
void __retarget_lock_acquire_recursive(_LOCK_T lock) | ||
{ | ||
if (lock != NULL) { | ||
sys_mutex_lock((struct sys_mutex*) lock, K_FOREVER); | ||
} | ||
} | ||
|
||
int __retarget_lock_try_acquire_recursive(_LOCK_T lock) | ||
{ | ||
if (lock != NULL) { | ||
if (sys_mutex_lock((struct sys_mutex *) lock, | ||
K_NO_WAIT) == 0) { | ||
return 1; | ||
} | ||
} | ||
return 0; | ||
} | ||
|
||
|
||
void __retarget_lock_release_recursive(_LOCK_T lock) | ||
{ | ||
if (lock != NULL) { | ||
sys_mutex_unlock((struct sys_mutex *) lock); | ||
} | ||
} | ||
|
||
#endif | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Note that
arch_swap
is only used for co-operative task switching. When a task is preempted, this function is not called._impure_ptr
should be updated inz_arm_pendsv
, which is the actual common task switching function.Line 228 of
swap_helper.S
would be a good place to add this.zephyr/arch/arm/core/swap_helper.S
Lines 225 to 229 in 816d2c4
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
do we need to make this change in the context switch code for all arches or is there something special about ARM that _impure_ptr needs to be updated like this?
also what about SMP?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes,
struct reent
is basically per-thread context for newlib.https://github.com/bminor/newlib/blob/b61dc22adaf82114eee3edce91cc3433bcd27fe5/newlib/libc/include/sys/reent.h#L377-L424
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For ARM, SMP is not supported at the moment. I will look into this in the future.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
these changes impact all arches so I think we will need to close on this before it can be merged.
I don't see how a global _impure_ptr updated on context switch could ever work in an SMP system, surely newlib has something for this...does this need to be stored in threa-local storage?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
perhaps we need to override __getreent()?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looks like that would be the correct approach for SMP.
https://github.com/eblot/newlib/blob/2a63fa0fd26ffb6603f69d9e369e944fe449c246/newlib/libc/sys/linux/linuxthreads/getreent.c#L5-L10
One problem I see is that
__getreent
is not declared__weak
, so it would not be override-able.https://github.com/eblot/newlib/blob/2a63fa0fd26ffb6603f69d9e369e944fe449c246/newlib/libc/reent/getreent.c#L10-L14
I wonder if Zephyr should create a separate fork of newlib for this.