Skip to content

Commit b2efa05

Browse files
htejunaxboe
authored andcommitted
block, cfq: unlink cfq_io_context's immediately
cic is association between io_context and request_queue. A cic is linked from both ioc and q and should be destroyed when either one goes away. As ioc and q both have their own locks, locking becomes a bit complex - both orders work for removal from one but not from the other. Currently, cfq tries to circumvent this locking order issue with RCU. ioc->lock nests inside queue_lock but the radix tree and cic's are also protected by RCU allowing either side to walk their lists without grabbing lock. This rather unconventional use of RCU quickly devolves into extremely fragile convolution. e.g. The following is from cfqd going away too soon after ioc and q exits raced. general protection fault: 0000 [raspberrypi#1] PREEMPT SMP CPU 2 Modules linked in: [ 88.503444] Pid: 599, comm: hexdump Not tainted 3.1.0-rc10-work+ raspberrypi#158 Bochs Bochs RIP: 0010:[<ffffffff81397628>] [<ffffffff81397628>] cfq_exit_single_io_context+0x58/0xf0 ... Call Trace: [<ffffffff81395a4a>] call_for_each_cic+0x5a/0x90 [<ffffffff81395ab5>] cfq_exit_io_context+0x15/0x20 [<ffffffff81389130>] exit_io_context+0x100/0x140 [<ffffffff81098a29>] do_exit+0x579/0x850 [<ffffffff81098d5b>] do_group_exit+0x5b/0xd0 [<ffffffff81098de7>] sys_exit_group+0x17/0x20 [<ffffffff81b02f2b>] system_call_fastpath+0x16/0x1b The only real hot path here is cic lookup during request initialization and avoiding extra locking requires very confined use of RCU. This patch makes cic removal from both ioc and request_queue perform double-locking and unlink immediately. * From q side, the change is almost trivial as ioc->lock nests inside queue_lock. It just needs to grab each ioc->lock as it walks cic_list and unlink it. * From ioc side, it's a bit more difficult because of inversed lock order. ioc needs its lock to walk its cic_list but can't grab the matching queue_lock and needs to perform unlock-relock dancing. Unlinking is now wholly done from put_io_context() and fast path is optimized by using the queue_lock the caller already holds, which is by far the most common case. If the ioc accessed multiple devices, it tries with trylock. In unlikely cases of fast path failure, it falls back to full double-locking dance from workqueue. Double-locking isn't the prettiest thing in the world but it's *far* simpler and more understandable than RCU trick without adding any meaningful overhead. This still leaves a lot of now unnecessary RCU logics. Future patches will trim them. -v2: Vivek pointed out that cic->q was being dereferenced after cic->release() was called. Updated to use local variable @this_q instead. Signed-off-by: Tejun Heo <[email protected]> Cc: Vivek Goyal <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent f1a4f4d commit b2efa05

File tree

7 files changed

+159
-72
lines changed

7 files changed

+159
-72
lines changed

block/blk-cgroup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1649,7 +1649,7 @@ static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
16491649
ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
16501650
if (ioc) {
16511651
ioc_cgroup_changed(ioc);
1652-
put_io_context(ioc);
1652+
put_io_context(ioc, NULL);
16531653
}
16541654
}
16551655

block/blk-ioc.c

Lines changed: 137 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -29,55 +29,164 @@ void get_io_context(struct io_context *ioc)
2929
}
3030
EXPORT_SYMBOL(get_io_context);
3131

32-
static void cfq_dtor(struct io_context *ioc)
32+
/*
33+
* Releasing ioc may nest into another put_io_context() leading to nested
34+
* fast path release. As the ioc's can't be the same, this is okay but
35+
* makes lockdep whine. Keep track of nesting and use it as subclass.
36+
*/
37+
#ifdef CONFIG_LOCKDEP
38+
#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
39+
#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
40+
#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
41+
#else
42+
#define ioc_release_depth(q) 0
43+
#define ioc_release_depth_inc(q) do { } while (0)
44+
#define ioc_release_depth_dec(q) do { } while (0)
45+
#endif
46+
47+
/*
48+
* Slow path for ioc release in put_io_context(). Performs double-lock
49+
* dancing to unlink all cic's and then frees ioc.
50+
*/
51+
static void ioc_release_fn(struct work_struct *work)
3352
{
34-
if (!hlist_empty(&ioc->cic_list)) {
35-
struct cfq_io_context *cic;
53+
struct io_context *ioc = container_of(work, struct io_context,
54+
release_work);
55+
struct request_queue *last_q = NULL;
56+
57+
spin_lock_irq(&ioc->lock);
58+
59+
while (!hlist_empty(&ioc->cic_list)) {
60+
struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first,
61+
struct cfq_io_context,
62+
cic_list);
63+
struct request_queue *this_q = cic->q;
64+
65+
if (this_q != last_q) {
66+
/*
67+
* Need to switch to @this_q. Once we release
68+
* @ioc->lock, it can go away along with @cic.
69+
* Hold on to it.
70+
*/
71+
__blk_get_queue(this_q);
72+
73+
/*
74+
* blk_put_queue() might sleep thanks to kobject
75+
* idiocy. Always release both locks, put and
76+
* restart.
77+
*/
78+
if (last_q) {
79+
spin_unlock(last_q->queue_lock);
80+
spin_unlock_irq(&ioc->lock);
81+
blk_put_queue(last_q);
82+
} else {
83+
spin_unlock_irq(&ioc->lock);
84+
}
85+
86+
last_q = this_q;
87+
spin_lock_irq(this_q->queue_lock);
88+
spin_lock(&ioc->lock);
89+
continue;
90+
}
91+
ioc_release_depth_inc(this_q);
92+
cic->exit(cic);
93+
cic->release(cic);
94+
ioc_release_depth_dec(this_q);
95+
}
3696

37-
cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
38-
cic_list);
39-
cic->dtor(ioc);
97+
if (last_q) {
98+
spin_unlock(last_q->queue_lock);
99+
spin_unlock_irq(&ioc->lock);
100+
blk_put_queue(last_q);
101+
} else {
102+
spin_unlock_irq(&ioc->lock);
40103
}
104+
105+
kmem_cache_free(iocontext_cachep, ioc);
41106
}
42107

43108
/**
44109
* put_io_context - put a reference of io_context
45110
* @ioc: io_context to put
111+
* @locked_q: request_queue the caller is holding queue_lock of (hint)
46112
*
47113
* Decrement reference count of @ioc and release it if the count reaches
48-
* zero.
114+
* zero. If the caller is holding queue_lock of a queue, it can indicate
115+
* that with @locked_q. This is an optimization hint and the caller is
116+
* allowed to pass in %NULL even when it's holding a queue_lock.
49117
*/
50-
void put_io_context(struct io_context *ioc)
118+
void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
51119
{
120+
struct request_queue *last_q = locked_q;
121+
unsigned long flags;
122+
52123
if (ioc == NULL)
53124
return;
54125

55126
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
127+
if (locked_q)
128+
lockdep_assert_held(locked_q->queue_lock);
56129

57130
if (!atomic_long_dec_and_test(&ioc->refcount))
58131
return;
59132

60-
rcu_read_lock();
61-
cfq_dtor(ioc);
62-
rcu_read_unlock();
63-
64-
kmem_cache_free(iocontext_cachep, ioc);
65-
}
66-
EXPORT_SYMBOL(put_io_context);
133+
/*
134+
* Destroy @ioc. This is a bit messy because cic's are chained
135+
* from both ioc and queue, and ioc->lock nests inside queue_lock.
136+
* The inner ioc->lock should be held to walk our cic_list and then
137+
* for each cic the outer matching queue_lock should be grabbed.
138+
* ie. We need to do reverse-order double lock dancing.
139+
*
140+
* Another twist is that we are often called with one of the
141+
* matching queue_locks held as indicated by @locked_q, which
142+
* prevents performing double-lock dance for other queues.
143+
*
144+
* So, we do it in two stages. The fast path uses the queue_lock
145+
* the caller is holding and, if other queues need to be accessed,
146+
* uses trylock to avoid introducing locking dependency. This can
147+
* handle most cases, especially if @ioc was performing IO on only
148+
* single device.
149+
*
150+
* If trylock doesn't cut it, we defer to @ioc->release_work which
151+
* can do all the double-locking dancing.
152+
*/
153+
spin_lock_irqsave_nested(&ioc->lock, flags,
154+
ioc_release_depth(locked_q));
155+
156+
while (!hlist_empty(&ioc->cic_list)) {
157+
struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first,
158+
struct cfq_io_context,
159+
cic_list);
160+
struct request_queue *this_q = cic->q;
161+
162+
if (this_q != last_q) {
163+
if (last_q && last_q != locked_q)
164+
spin_unlock(last_q->queue_lock);
165+
last_q = NULL;
166+
167+
if (!spin_trylock(this_q->queue_lock))
168+
break;
169+
last_q = this_q;
170+
continue;
171+
}
172+
ioc_release_depth_inc(this_q);
173+
cic->exit(cic);
174+
cic->release(cic);
175+
ioc_release_depth_dec(this_q);
176+
}
67177

68-
static void cfq_exit(struct io_context *ioc)
69-
{
70-
rcu_read_lock();
178+
if (last_q && last_q != locked_q)
179+
spin_unlock(last_q->queue_lock);
71180

72-
if (!hlist_empty(&ioc->cic_list)) {
73-
struct cfq_io_context *cic;
181+
spin_unlock_irqrestore(&ioc->lock, flags);
74182

75-
cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
76-
cic_list);
77-
cic->exit(ioc);
78-
}
79-
rcu_read_unlock();
183+
/* if no cic's left, we're done; otherwise, kick release_work */
184+
if (hlist_empty(&ioc->cic_list))
185+
kmem_cache_free(iocontext_cachep, ioc);
186+
else
187+
schedule_work(&ioc->release_work);
80188
}
189+
EXPORT_SYMBOL(put_io_context);
81190

82191
/* Called by the exiting task */
83192
void exit_io_context(struct task_struct *task)
@@ -92,10 +201,8 @@ void exit_io_context(struct task_struct *task)
92201
task->io_context = NULL;
93202
task_unlock(task);
94203

95-
if (atomic_dec_and_test(&ioc->nr_tasks))
96-
cfq_exit(ioc);
97-
98-
put_io_context(ioc);
204+
atomic_dec(&ioc->nr_tasks);
205+
put_io_context(ioc, NULL);
99206
}
100207

101208
static struct io_context *create_task_io_context(struct task_struct *task,
@@ -115,6 +222,7 @@ static struct io_context *create_task_io_context(struct task_struct *task,
115222
spin_lock_init(&ioc->lock);
116223
INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
117224
INIT_HLIST_HEAD(&ioc->cic_list);
225+
INIT_WORK(&ioc->release_work, ioc_release_fn);
118226

119227
/* try to install, somebody might already have beaten us to it */
120228
task_lock(task);

block/cfq-iosched.c

Lines changed: 8 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1778,7 +1778,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
17781778
cfqd->active_queue = NULL;
17791779

17801780
if (cfqd->active_cic) {
1781-
put_io_context(cfqd->active_cic->ioc);
1781+
put_io_context(cfqd->active_cic->ioc, cfqd->queue);
17821782
cfqd->active_cic = NULL;
17831783
}
17841784
}
@@ -2812,38 +2812,6 @@ static void cfq_exit_cic(struct cfq_io_context *cic)
28122812
}
28132813
}
28142814

2815-
static void cfq_exit_single_io_context(struct io_context *ioc,
2816-
struct cfq_io_context *cic)
2817-
{
2818-
struct cfq_data *cfqd = cic_to_cfqd(cic);
2819-
2820-
if (cfqd) {
2821-
struct request_queue *q = cfqd->queue;
2822-
unsigned long flags;
2823-
2824-
spin_lock_irqsave(q->queue_lock, flags);
2825-
2826-
/*
2827-
* Ensure we get a fresh copy of the ->key to prevent
2828-
* race between exiting task and queue
2829-
*/
2830-
smp_read_barrier_depends();
2831-
if (cic->key == cfqd)
2832-
cfq_exit_cic(cic);
2833-
2834-
spin_unlock_irqrestore(q->queue_lock, flags);
2835-
}
2836-
}
2837-
2838-
/*
2839-
* The process that ioc belongs to has exited, we need to clean up
2840-
* and put the internal structures we have that belongs to that process.
2841-
*/
2842-
static void cfq_exit_io_context(struct io_context *ioc)
2843-
{
2844-
call_for_each_cic(ioc, cfq_exit_single_io_context);
2845-
}
2846-
28472815
static struct cfq_io_context *
28482816
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
28492817
{
@@ -2855,8 +2823,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
28552823
cic->ttime.last_end_request = jiffies;
28562824
INIT_LIST_HEAD(&cic->queue_list);
28572825
INIT_HLIST_NODE(&cic->cic_list);
2858-
cic->dtor = cfq_free_io_context;
2859-
cic->exit = cfq_exit_io_context;
2826+
cic->exit = cfq_exit_cic;
2827+
cic->release = cfq_release_cic;
28602828
elv_ioc_count_inc(cfq_ioc_count);
28612829
}
28622830

@@ -3726,7 +3694,7 @@ static void cfq_put_request(struct request *rq)
37263694
BUG_ON(!cfqq->allocated[rw]);
37273695
cfqq->allocated[rw]--;
37283696

3729-
put_io_context(RQ_CIC(rq)->ioc);
3697+
put_io_context(RQ_CIC(rq)->ioc, cfqq->cfqd->queue);
37303698

37313699
rq->elevator_private[0] = NULL;
37323700
rq->elevator_private[1] = NULL;
@@ -3937,8 +3905,12 @@ static void cfq_exit_queue(struct elevator_queue *e)
39373905
struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
39383906
struct cfq_io_context,
39393907
queue_list);
3908+
struct io_context *ioc = cic->ioc;
39403909

3910+
spin_lock(&ioc->lock);
39413911
cfq_exit_cic(cic);
3912+
cfq_release_cic(cic);
3913+
spin_unlock(&ioc->lock);
39423914
}
39433915

39443916
cfq_put_async_queues(cfqd);

fs/ioprio.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
5151
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
5252
if (ioc) {
5353
ioc_ioprio_changed(ioc, ioprio);
54-
put_io_context(ioc);
54+
put_io_context(ioc, NULL);
5555
}
5656

5757
return err;

include/linux/blkdev.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -393,6 +393,9 @@ struct request_queue {
393393
/* Throttle data */
394394
struct throtl_data *td;
395395
#endif
396+
#ifdef CONFIG_LOCKDEP
397+
int ioc_release_depth;
398+
#endif
396399
};
397400

398401
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */

include/linux/iocontext.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
#include <linux/radix-tree.h>
55
#include <linux/rcupdate.h>
6+
#include <linux/workqueue.h>
67

78
struct cfq_queue;
89
struct cfq_ttime {
@@ -33,8 +34,8 @@ struct cfq_io_context {
3334

3435
unsigned long changed;
3536

36-
void (*dtor)(struct io_context *); /* destructor */
37-
void (*exit)(struct io_context *); /* called on task exit */
37+
void (*exit)(struct cfq_io_context *);
38+
void (*release)(struct cfq_io_context *);
3839

3940
struct rcu_head rcu_head;
4041
};
@@ -61,6 +62,8 @@ struct io_context {
6162
struct radix_tree_root radix_root;
6263
struct hlist_head cic_list;
6364
void __rcu *ioc_data;
65+
66+
struct work_struct release_work;
6467
};
6568

6669
static inline struct io_context *ioc_task_link(struct io_context *ioc)
@@ -79,15 +82,16 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
7982

8083
struct task_struct;
8184
#ifdef CONFIG_BLOCK
82-
void put_io_context(struct io_context *ioc);
85+
void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
8386
void exit_io_context(struct task_struct *task);
8487
struct io_context *get_task_io_context(struct task_struct *task,
8588
gfp_t gfp_flags, int node);
8689
void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
8790
void ioc_cgroup_changed(struct io_context *ioc);
8891
#else
8992
struct io_context;
90-
static inline void put_io_context(struct io_context *ioc) { }
93+
static inline void put_io_context(struct io_context *ioc,
94+
struct request_queue *locked_q) { }
9195
static inline void exit_io_context(struct task_struct *task) { }
9296
#endif
9397

kernel/fork.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -887,7 +887,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
887887
return -ENOMEM;
888888

889889
new_ioc->ioprio = ioc->ioprio;
890-
put_io_context(new_ioc);
890+
put_io_context(new_ioc, NULL);
891891
}
892892
#endif
893893
return 0;

0 commit comments

Comments
 (0)