@@ -216,7 +216,6 @@ struct worker_pool {
216
216
struct worker * manager ; /* L: purely informational */
217
217
struct list_head workers ; /* A: attached workers */
218
218
struct list_head dying_workers ; /* A: workers about to die */
219
- struct completion * detach_completion ; /* all workers detached */
220
219
221
220
struct ida worker_ida ; /* worker IDs for task name */
222
221
@@ -2696,7 +2695,6 @@ static void worker_attach_to_pool(struct worker *worker,
2696
2695
static void worker_detach_from_pool (struct worker * worker )
2697
2696
{
2698
2697
struct worker_pool * pool = worker -> pool ;
2699
- struct completion * detach_completion = NULL ;
2700
2698
2701
2699
/* there is one permanent BH worker per CPU which should never detach */
2702
2700
WARN_ON_ONCE (pool -> flags & POOL_BH );
@@ -2707,15 +2705,10 @@ static void worker_detach_from_pool(struct worker *worker)
2707
2705
list_del (& worker -> node );
2708
2706
worker -> pool = NULL ;
2709
2707
2710
- if (list_empty (& pool -> workers ) && list_empty (& pool -> dying_workers ))
2711
- detach_completion = pool -> detach_completion ;
2712
2708
mutex_unlock (& wq_pool_attach_mutex );
2713
2709
2714
2710
/* clear leftover flags without pool->lock after it is detached */
2715
2711
worker -> flags &= ~(WORKER_UNBOUND | WORKER_REBOUND );
2716
-
2717
- if (detach_completion )
2718
- complete (detach_completion );
2719
2712
}
2720
2713
2721
2714
/**
@@ -2816,10 +2809,9 @@ static void unbind_worker(struct worker *worker)
2816
2809
2817
2810
static void wake_dying_workers (struct list_head * cull_list )
2818
2811
{
2819
- struct worker * worker , * tmp ;
2812
+ struct worker * worker ;
2820
2813
2821
- list_for_each_entry_safe (worker , tmp , cull_list , entry ) {
2822
- list_del_init (& worker -> entry );
2814
+ list_for_each_entry (worker , cull_list , entry ) {
2823
2815
unbind_worker (worker );
2824
2816
/*
2825
2817
* If the worker was somehow already running, then it had to be
@@ -2835,6 +2827,17 @@ static void wake_dying_workers(struct list_head *cull_list)
2835
2827
}
2836
2828
}
2837
2829
2830
+ static void reap_dying_workers (struct list_head * cull_list )
2831
+ {
2832
+ struct worker * worker , * tmp ;
2833
+
2834
+ list_for_each_entry_safe (worker , tmp , cull_list , entry ) {
2835
+ list_del_init (& worker -> entry );
2836
+ kthread_stop_put (worker -> task );
2837
+ kfree (worker );
2838
+ }
2839
+ }
2840
+
2838
2841
/**
2839
2842
* set_worker_dying - Tag a worker for destruction
2840
2843
* @worker: worker to be destroyed
@@ -2866,6 +2869,9 @@ static void set_worker_dying(struct worker *worker, struct list_head *list)
2866
2869
2867
2870
list_move (& worker -> entry , list );
2868
2871
list_move (& worker -> node , & pool -> dying_workers );
2872
+
2873
+ /* get an extra task struct reference for later kthread_stop_put() */
2874
+ get_task_struct (worker -> task );
2869
2875
}
2870
2876
2871
2877
/**
@@ -2949,6 +2955,8 @@ static void idle_cull_fn(struct work_struct *work)
2949
2955
raw_spin_unlock_irq (& pool -> lock );
2950
2956
wake_dying_workers (& cull_list );
2951
2957
mutex_unlock (& wq_pool_attach_mutex );
2958
+
2959
+ reap_dying_workers (& cull_list );
2952
2960
}
2953
2961
2954
2962
static void send_mayday (struct work_struct * work )
@@ -3330,7 +3338,6 @@ static int worker_thread(void *__worker)
3330
3338
ida_free (& pool -> worker_ida , worker -> id );
3331
3339
worker_detach_from_pool (worker );
3332
3340
WARN_ON_ONCE (!list_empty (& worker -> entry ));
3333
- kfree (worker );
3334
3341
return 0 ;
3335
3342
}
3336
3343
@@ -4863,7 +4870,6 @@ static void rcu_free_pool(struct rcu_head *rcu)
4863
4870
*/
4864
4871
static void put_unbound_pool (struct worker_pool * pool )
4865
4872
{
4866
- DECLARE_COMPLETION_ONSTACK (detach_completion );
4867
4873
struct worker * worker ;
4868
4874
LIST_HEAD (cull_list );
4869
4875
@@ -4917,12 +4923,9 @@ static void put_unbound_pool(struct worker_pool *pool)
4917
4923
4918
4924
wake_dying_workers (& cull_list );
4919
4925
4920
- if (!list_empty (& pool -> workers ) || !list_empty (& pool -> dying_workers ))
4921
- pool -> detach_completion = & detach_completion ;
4922
4926
mutex_unlock (& wq_pool_attach_mutex );
4923
4927
4924
- if (pool -> detach_completion )
4925
- wait_for_completion (pool -> detach_completion );
4928
+ reap_dying_workers (& cull_list );
4926
4929
4927
4930
/* shut down the timers */
4928
4931
del_timer_sync (& pool -> idle_timer );
0 commit comments