16
16
#include <zephyr/posix/pthread.h>
17
17
#include <zephyr/sys/slist.h>
18
18
19
+ #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
20
+ #define DYNAMIC_STACK_SIZE CONFIG_DYNAMIC_THREAD_STACK_SIZE
21
+ #else
22
+ #define DYNAMIC_STACK_SIZE 0
23
+ #endif
24
+
19
25
#define PTHREAD_INIT_FLAGS PTHREAD_CANCEL_ENABLE
20
26
#define PTHREAD_CANCELED ((void *) -1)
21
27
@@ -34,6 +40,7 @@ BUILD_ASSERT((PTHREAD_CREATE_DETACHED == 0 || PTHREAD_CREATE_JOINABLE == 0) &&
34
40
BUILD_ASSERT ((PTHREAD_CANCEL_ENABLE == 0 || PTHREAD_CANCEL_DISABLE == 0 ) &&
35
41
(PTHREAD_CANCEL_ENABLE == 1 || PTHREAD_CANCEL_DISABLE == 1 ));
36
42
43
+ static void posix_thread_recycle (void );
37
44
static sys_dlist_t ready_q = SYS_DLIST_STATIC_INIT (& ready_q );
38
45
static sys_dlist_t run_q = SYS_DLIST_STATIC_INIT (& run_q );
39
46
static sys_dlist_t done_q = SYS_DLIST_STATIC_INIT (& done_q );
@@ -205,13 +212,13 @@ int pthread_attr_setstack(pthread_attr_t *_attr, void *stackaddr, size_t stacksi
205
212
206
213
static bool pthread_attr_is_valid (const struct pthread_attr * attr )
207
214
{
208
- /*
209
- * FIXME: Pthread attribute must be non-null and it provides stack
210
- * pointer and stack size. So even though POSIX 1003.1 spec accepts
211
- * attrib as NULL but zephyr needs it initialized with valid stack.
212
- */
213
- if ( attr == NULL || attr -> initialized == 0U || attr -> stack == NULL ||
214
- attr -> stacksize == 0 ) {
215
+ /* auto-alloc thread stack */
216
+ if ( attr == NULL ) {
217
+ return true;
218
+ }
219
+
220
+ /* caller-provided thread stack */
221
+ if ( attr -> initialized == 0U || attr -> stack == NULL || attr -> stacksize == 0 ) {
215
222
return false;
216
223
}
217
224
@@ -234,6 +241,13 @@ static bool pthread_attr_is_valid(const struct pthread_attr *attr)
234
241
return true;
235
242
}
236
243
244
+ static void posix_thread_recycle_work_handler (struct k_work * work )
245
+ {
246
+ ARG_UNUSED (work );
247
+ posix_thread_recycle ();
248
+ }
249
+ static K_WORK_DELAYABLE_DEFINE (posix_thread_recycle_work , posix_thread_recycle_work_handler ) ;
250
+
237
251
static void posix_thread_finalize (struct posix_thread * t , void * retval )
238
252
{
239
253
sys_snode_t * node_l ;
@@ -259,6 +273,9 @@ static void posix_thread_finalize(struct posix_thread *t, void *retval)
259
273
t -> retval = retval ;
260
274
k_spin_unlock (& pthread_pool_lock , key );
261
275
276
+ /* trigger recycle work */
277
+ (void )k_work_schedule (& posix_thread_recycle_work , K_MSEC (CONFIG_PTHREAD_RECYCLER_DELAY_MS ));
278
+
262
279
/* abort the underlying k_thread */
263
280
k_thread_abort (& t -> thread );
264
281
}
@@ -283,6 +300,45 @@ static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3)
283
300
CODE_UNREACHABLE ;
284
301
}
285
302
303
+ static void posix_thread_recycle (void )
304
+ {
305
+ k_spinlock_key_t key ;
306
+ struct posix_thread * t ;
307
+ struct posix_thread * safe_t ;
308
+ sys_dlist_t recyclables = SYS_DLIST_STATIC_INIT (& recyclables );
309
+
310
+ key = k_spin_lock (& pthread_pool_lock );
311
+ SYS_DLIST_FOR_EACH_CONTAINER_SAFE (& done_q , t , safe_t , q_node ) {
312
+ if (t -> detachstate == PTHREAD_CREATE_JOINABLE ) {
313
+ /* thread has not been joined yet */
314
+ continue ;
315
+ }
316
+
317
+ sys_dlist_remove (& t -> q_node );
318
+ sys_dlist_append (& recyclables , & t -> q_node );
319
+ }
320
+ k_spin_unlock (& pthread_pool_lock , key );
321
+
322
+ if (sys_dlist_is_empty (& recyclables )) {
323
+ return ;
324
+ }
325
+
326
+ if (IS_ENABLED (CONFIG_DYNAMIC_THREAD )) {
327
+ SYS_DLIST_FOR_EACH_CONTAINER (& recyclables , t , q_node ) {
328
+ if (t -> dynamic_stack != NULL ) {
329
+ (void )k_thread_stack_free (t -> dynamic_stack );
330
+ t -> dynamic_stack = NULL ;
331
+ }
332
+ }
333
+ }
334
+
335
+ key = k_spin_lock (& pthread_pool_lock );
336
+ while (!sys_dlist_is_empty (& recyclables )) {
337
+ sys_dlist_append (& ready_q , sys_dlist_get (& recyclables ));
338
+ }
339
+ k_spin_unlock (& pthread_pool_lock , key );
340
+ }
341
+
286
342
/**
287
343
* @brief Create a new thread.
288
344
*
@@ -297,32 +353,33 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou
297
353
int err ;
298
354
k_spinlock_key_t key ;
299
355
pthread_barrier_t barrier ;
300
- struct posix_thread * safe_t ;
301
356
struct posix_thread * t = NULL ;
302
- const struct pthread_attr * attr = (const struct pthread_attr * )_attr ;
357
+ struct pthread_attr attr_storage = init_pthread_attrs ;
358
+ struct pthread_attr * attr = (struct pthread_attr * )_attr ;
303
359
304
360
if (!pthread_attr_is_valid (attr )) {
305
361
return EINVAL ;
306
362
}
307
363
364
+ if (attr == NULL ) {
365
+ attr = & attr_storage ;
366
+ attr -> stacksize = DYNAMIC_STACK_SIZE ;
367
+ attr -> stack =
368
+ k_thread_stack_alloc (attr -> stacksize , k_is_user_context () ? K_USER : 0 );
369
+ if (attr -> stack == NULL ) {
370
+ return EAGAIN ;
371
+ }
372
+ } else {
373
+ __ASSERT_NO_MSG (attr != & attr_storage );
374
+ }
375
+
376
+ /* reclaim resources greedily */
377
+ posix_thread_recycle ();
378
+
308
379
key = k_spin_lock (& pthread_pool_lock );
309
380
if (!sys_dlist_is_empty (& ready_q )) {
310
- /* spawn thread 't' directly from ready_q */
311
381
t = CONTAINER_OF (sys_dlist_get (& ready_q ), struct posix_thread , q_node );
312
- } else {
313
- SYS_DLIST_FOR_EACH_CONTAINER_SAFE (& done_q , t , safe_t , q_node ) {
314
- if (t -> detachstate == PTHREAD_CREATE_JOINABLE ) {
315
- /* thread has not been joined yet */
316
- continue ;
317
- }
318
382
319
- /* spawn thread 't' from done_q */
320
- sys_dlist_remove (& t -> q_node );
321
- break ;
322
- }
323
- }
324
-
325
- if (t != NULL ) {
326
383
/* initialize thread state */
327
384
sys_dlist_append (& run_q , & t -> q_node );
328
385
t -> qid = POSIX_THREAD_RUN_Q ;
@@ -332,12 +389,22 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou
332
389
}
333
390
t -> cancel_pending = false;
334
391
sys_slist_init (& t -> key_list );
392
+ t -> dynamic_stack = _attr == NULL ? attr -> stack : NULL ;
335
393
}
336
394
k_spin_unlock (& pthread_pool_lock , key );
337
395
396
+ if (t == NULL ) {
397
+ /* no threads are ready */
398
+ return EAGAIN ;
399
+ }
400
+
338
401
if (IS_ENABLED (CONFIG_PTHREAD_CREATE_BARRIER )) {
339
402
err = pthread_barrier_init (& barrier , NULL , 2 );
340
403
if (err != 0 ) {
404
+ if (t -> dynamic_stack != NULL ) {
405
+ (void )k_thread_stack_free (attr -> stack );
406
+ }
407
+
341
408
/* cannot allocate barrier. move thread back to ready_q */
342
409
key = k_spin_lock (& pthread_pool_lock );
343
410
sys_dlist_remove (& t -> q_node );
@@ -348,11 +415,6 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou
348
415
}
349
416
}
350
417
351
- if (t == NULL ) {
352
- /* no threads are ready */
353
- return EAGAIN ;
354
- }
355
-
356
418
/* spawn the thread */
357
419
k_thread_create (& t -> thread , attr -> stack , attr -> stacksize , zephyr_thread_wrapper ,
358
420
(void * )arg , threadroutine ,
0 commit comments