Skip to content

Commit 68f8305

Browse files
Lai Jiangshanhtejun
Lai Jiangshan
authored andcommitted
workqueue: Reap workers via kthread_stop() and remove detach_completion
The code to kick off the destruction of workers is now in a process context (idle_cull_fn()), so kthread_stop() can be used in the process context to replace the work of pool->detach_completion. The wakeup in wake_dying_workers() is unneeded after this change, but it is harmless, jut keep it here until next patch renames wake_dying_workers() rather than renaming it again and again. Cc: Valentin Schneider <[email protected]> Signed-off-by: Lai Jiangshan <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent b56c720 commit 68f8305

File tree

1 file changed

+19
-16
lines changed

1 file changed

+19
-16
lines changed

kernel/workqueue.c

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,6 @@ struct worker_pool {
216216
struct worker *manager; /* L: purely informational */
217217
struct list_head workers; /* A: attached workers */
218218
struct list_head dying_workers; /* A: workers about to die */
219-
struct completion *detach_completion; /* all workers detached */
220219

221220
struct ida worker_ida; /* worker IDs for task name */
222221

@@ -2696,7 +2695,6 @@ static void worker_attach_to_pool(struct worker *worker,
26962695
static void worker_detach_from_pool(struct worker *worker)
26972696
{
26982697
struct worker_pool *pool = worker->pool;
2699-
struct completion *detach_completion = NULL;
27002698

27012699
/* there is one permanent BH worker per CPU which should never detach */
27022700
WARN_ON_ONCE(pool->flags & POOL_BH);
@@ -2707,15 +2705,10 @@ static void worker_detach_from_pool(struct worker *worker)
27072705
list_del(&worker->node);
27082706
worker->pool = NULL;
27092707

2710-
if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
2711-
detach_completion = pool->detach_completion;
27122708
mutex_unlock(&wq_pool_attach_mutex);
27132709

27142710
/* clear leftover flags without pool->lock after it is detached */
27152711
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2716-
2717-
if (detach_completion)
2718-
complete(detach_completion);
27192712
}
27202713

27212714
/**
@@ -2816,10 +2809,9 @@ static void unbind_worker(struct worker *worker)
28162809

28172810
static void wake_dying_workers(struct list_head *cull_list)
28182811
{
2819-
struct worker *worker, *tmp;
2812+
struct worker *worker;
28202813

2821-
list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2822-
list_del_init(&worker->entry);
2814+
list_for_each_entry(worker, cull_list, entry) {
28232815
unbind_worker(worker);
28242816
/*
28252817
* If the worker was somehow already running, then it had to be
@@ -2835,6 +2827,17 @@ static void wake_dying_workers(struct list_head *cull_list)
28352827
}
28362828
}
28372829

2830+
static void reap_dying_workers(struct list_head *cull_list)
2831+
{
2832+
struct worker *worker, *tmp;
2833+
2834+
list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2835+
list_del_init(&worker->entry);
2836+
kthread_stop_put(worker->task);
2837+
kfree(worker);
2838+
}
2839+
}
2840+
28382841
/**
28392842
* set_worker_dying - Tag a worker for destruction
28402843
* @worker: worker to be destroyed
@@ -2866,6 +2869,9 @@ static void set_worker_dying(struct worker *worker, struct list_head *list)
28662869

28672870
list_move(&worker->entry, list);
28682871
list_move(&worker->node, &pool->dying_workers);
2872+
2873+
/* get an extra task struct reference for later kthread_stop_put() */
2874+
get_task_struct(worker->task);
28692875
}
28702876

28712877
/**
@@ -2949,6 +2955,8 @@ static void idle_cull_fn(struct work_struct *work)
29492955
raw_spin_unlock_irq(&pool->lock);
29502956
wake_dying_workers(&cull_list);
29512957
mutex_unlock(&wq_pool_attach_mutex);
2958+
2959+
reap_dying_workers(&cull_list);
29522960
}
29532961

29542962
static void send_mayday(struct work_struct *work)
@@ -3330,7 +3338,6 @@ static int worker_thread(void *__worker)
33303338
ida_free(&pool->worker_ida, worker->id);
33313339
worker_detach_from_pool(worker);
33323340
WARN_ON_ONCE(!list_empty(&worker->entry));
3333-
kfree(worker);
33343341
return 0;
33353342
}
33363343

@@ -4863,7 +4870,6 @@ static void rcu_free_pool(struct rcu_head *rcu)
48634870
*/
48644871
static void put_unbound_pool(struct worker_pool *pool)
48654872
{
4866-
DECLARE_COMPLETION_ONSTACK(detach_completion);
48674873
struct worker *worker;
48684874
LIST_HEAD(cull_list);
48694875

@@ -4917,12 +4923,9 @@ static void put_unbound_pool(struct worker_pool *pool)
49174923

49184924
wake_dying_workers(&cull_list);
49194925

4920-
if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
4921-
pool->detach_completion = &detach_completion;
49224926
mutex_unlock(&wq_pool_attach_mutex);
49234927

4924-
if (pool->detach_completion)
4925-
wait_for_completion(pool->detach_completion);
4928+
reap_dying_workers(&cull_list);
49264929

49274930
/* shut down the timers */
49284931
del_timer_sync(&pool->idle_timer);

0 commit comments

Comments
 (0)