Skip to content

Commit 8323f26

Browse files
Peter ZijlstraIngo Molnar
Peter Zijlstra
authored and
Ingo Molnar
committed
sched: Fix race in task_group()
Stefan reported a crash on a kernel before a3e5d10 ("sched: Don't call task_group() too many times in set_task_rq()"), he found the reason to be that the multiple task_group() invocations in set_task_rq() returned different values. Looking at all that I found a lack of serialization and plain wrong comments. The below tries to fix it using an extra pointer which is updated under the appropriate scheduler locks. Its not pretty, but I can't really see another way given how all the cgroup stuff works. Reported-and-tested-by: Stefan Bader <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Link: http://lkml.kernel.org/r/1340364965.18025.71.camel@twins Signed-off-by: Ingo Molnar <[email protected]>
1 parent 88b8dac commit 8323f26

File tree

4 files changed

+33
-16
lines changed

4 files changed

+33
-16
lines changed

include/linux/init_task.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,8 +123,17 @@ extern struct group_info init_groups;
123123

124124
extern struct cred init_cred;
125125

126+
extern struct task_group root_task_group;
127+
128+
#ifdef CONFIG_CGROUP_SCHED
129+
# define INIT_CGROUP_SCHED(tsk) \
130+
.sched_task_group = &root_task_group,
131+
#else
132+
# define INIT_CGROUP_SCHED(tsk)
133+
#endif
134+
126135
#ifdef CONFIG_PERF_EVENTS
127-
# define INIT_PERF_EVENTS(tsk) \
136+
# define INIT_PERF_EVENTS(tsk) \
128137
.perf_event_mutex = \
129138
__MUTEX_INITIALIZER(tsk.perf_event_mutex), \
130139
.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
@@ -161,6 +170,7 @@ extern struct cred init_cred;
161170
}, \
162171
.tasks = LIST_HEAD_INIT(tsk.tasks), \
163172
INIT_PUSHABLE_TASKS(tsk) \
173+
INIT_CGROUP_SCHED(tsk) \
164174
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
165175
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
166176
.real_parent = &tsk, \

include/linux/sched.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1245,6 +1245,9 @@ struct task_struct {
12451245
const struct sched_class *sched_class;
12461246
struct sched_entity se;
12471247
struct sched_rt_entity rt;
1248+
#ifdef CONFIG_CGROUP_SCHED
1249+
struct task_group *sched_task_group;
1250+
#endif
12481251

12491252
#ifdef CONFIG_PREEMPT_NOTIFIERS
12501253
/* list of struct preempt_notifier: */
@@ -2724,7 +2727,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
27242727
extern long sched_group_rt_period(struct task_group *tg);
27252728
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
27262729
#endif
2727-
#endif
2730+
#endif /* CONFIG_CGROUP_SCHED */
27282731

27292732
extern int task_can_switch_user(struct user_struct *up,
27302733
struct task_struct *tsk);

kernel/sched/core.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1096,7 +1096,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
10961096
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
10971097
*
10981098
* sched_move_task() holds both and thus holding either pins the cgroup,
1099-
* see set_task_rq().
1099+
* see task_group().
11001100
*
11011101
* Furthermore, all task_rq users should acquire both locks, see
11021102
* task_rq_lock().
@@ -7658,6 +7658,7 @@ void sched_destroy_group(struct task_group *tg)
76587658
*/
76597659
void sched_move_task(struct task_struct *tsk)
76607660
{
7661+
struct task_group *tg;
76617662
int on_rq, running;
76627663
unsigned long flags;
76637664
struct rq *rq;
@@ -7672,6 +7673,12 @@ void sched_move_task(struct task_struct *tsk)
76727673
if (unlikely(running))
76737674
tsk->sched_class->put_prev_task(rq, tsk);
76747675

7676+
tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
7677+
lockdep_is_held(&tsk->sighand->siglock)),
7678+
struct task_group, css);
7679+
tg = autogroup_task_group(tsk, tg);
7680+
tsk->sched_task_group = tg;
7681+
76757682
#ifdef CONFIG_FAIR_GROUP_SCHED
76767683
if (tsk->sched_class->task_move_group)
76777684
tsk->sched_class->task_move_group(tsk, on_rq);

kernel/sched/sched.h

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -538,22 +538,19 @@ extern int group_balance_cpu(struct sched_group *sg);
538538
/*
539539
* Return the group to which this tasks belongs.
540540
*
541-
* We use task_subsys_state_check() and extend the RCU verification with
542-
* pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
543-
* task it moves into the cgroup. Therefore by holding either of those locks,
544-
* we pin the task to the current cgroup.
541+
* We cannot use task_subsys_state() and friends because the cgroup
542+
* subsystem changes that value before the cgroup_subsys::attach() method
543+
* is called, therefore we cannot pin it and might observe the wrong value.
544+
*
545+
* The same is true for autogroup's p->signal->autogroup->tg, the autogroup
546+
* core changes this before calling sched_move_task().
547+
*
548+
* Instead we use a 'copy' which is updated from sched_move_task() while
549+
* holding both task_struct::pi_lock and rq::lock.
545550
*/
546551
static inline struct task_group *task_group(struct task_struct *p)
547552
{
548-
struct task_group *tg;
549-
struct cgroup_subsys_state *css;
550-
551-
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
552-
lockdep_is_held(&p->pi_lock) ||
553-
lockdep_is_held(&task_rq(p)->lock));
554-
tg = container_of(css, struct task_group, css);
555-
556-
return autogroup_task_group(p, tg);
553+
return p->sched_task_group;
557554
}
558555

559556
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */

0 commit comments

Comments
 (0)