Skip to content

Commit b56c720

Browse files
Lai Jiangshanhtejun
Lai Jiangshan
authored andcommitted
workqueue: Avoid nr_active manipulation in grabbing inactive items
Current try_to_grab_pending() activates the inactive item and subsequently treats it as though it were a standard activated item. This approach prevents duplicating handling logic for both active and inactive items, yet the premature activation of an inactive item triggers trace_workqueue_activate_work(), yielding an unintended user space visible side effect. And the unnecessary increment of the nr_active, which is not a simple counter now, followed by a counteracted decrement, is inefficient and complicates the code. Just remove the nr_active manipulation code in grabbing inactive items. Signed-off-by: Lai Jiangshan <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 37c2277 commit b56c720

File tree

1 file changed

+9
-33
lines changed

1 file changed

+9
-33
lines changed

kernel/workqueue.c

Lines changed: 9 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1683,33 +1683,6 @@ static void __pwq_activate_work(struct pool_workqueue *pwq,
16831683
__clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb);
16841684
}
16851685

1686-
/**
1687-
* pwq_activate_work - Activate a work item if inactive
1688-
* @pwq: pool_workqueue @work belongs to
1689-
* @work: work item to activate
1690-
*
1691-
* Returns %true if activated. %false if already active.
1692-
*/
1693-
static bool pwq_activate_work(struct pool_workqueue *pwq,
1694-
struct work_struct *work)
1695-
{
1696-
struct worker_pool *pool = pwq->pool;
1697-
struct wq_node_nr_active *nna;
1698-
1699-
lockdep_assert_held(&pool->lock);
1700-
1701-
if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE))
1702-
return false;
1703-
1704-
nna = wq_node_nr_active(pwq->wq, pool->node);
1705-
if (nna)
1706-
atomic_inc(&nna->nr);
1707-
1708-
pwq->nr_active++;
1709-
__pwq_activate_work(pwq, work);
1710-
return true;
1711-
}
1712-
17131686
static bool tryinc_node_nr_active(struct wq_node_nr_active *nna)
17141687
{
17151688
int max = READ_ONCE(nna->max);
@@ -2116,7 +2089,7 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
21162089
*/
21172090
pwq = get_work_pwq(work);
21182091
if (pwq && pwq->pool == pool) {
2119-
unsigned long work_data;
2092+
unsigned long work_data = *work_data_bits(work);
21202093

21212094
debug_work_deactivate(work);
21222095

@@ -2125,21 +2098,24 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
21252098
* pwq->inactive_works since a queued barrier can't be
21262099
* canceled (see the comments in insert_wq_barrier()).
21272100
*
2128-
* An inactive work item cannot be grabbed directly because
2101+
* An inactive work item cannot be deleted directly because
21292102
* it might have linked barrier work items which, if left
21302103
* on the inactive_works list, will confuse pwq->nr_active
2131-
* management later on and cause stall. Make sure the work
2132-
* item is activated before grabbing.
2104+
* management later on and cause stall. Move the linked
2105+
* barrier work items to the worklist when deleting the grabbed
2106+
* item. Also keep WORK_STRUCT_INACTIVE in work_data, so that
2107+
* it doesn't participate in nr_active management in later
2108+
* pwq_dec_nr_in_flight().
21332109
*/
2134-
pwq_activate_work(pwq, work);
2110+
if (work_data & WORK_STRUCT_INACTIVE)
2111+
move_linked_works(work, &pwq->pool->worklist, NULL);
21352112

21362113
list_del_init(&work->entry);
21372114

21382115
/*
21392116
* work->data points to pwq iff queued. Let's point to pool. As
21402117
* this destroys work->data needed by the next step, stash it.
21412118
*/
2142-
work_data = *work_data_bits(work);
21432119
set_work_pool_and_keep_pending(work, pool->id,
21442120
pool_offq_flags(pool));
21452121

0 commit comments

Comments
 (0)