diff options
author | Peter Zijlstra <peterz@infradead.org> | 2012-06-22 13:36:05 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-10-02 09:47:42 -0700 |
commit | 64ac72f81b1b41819dab596d1524bd5cae4813fd (patch) | |
tree | 437e934421cb1aa4380318cfc4a584e913c436ac /include/linux | |
parent | cf0a716684d6743275fdc45c6a43317272fba142 (diff) | |
download | kernel_samsung_smdk4412-64ac72f81b1b41819dab596d1524bd5cae4813fd.zip kernel_samsung_smdk4412-64ac72f81b1b41819dab596d1524bd5cae4813fd.tar.gz kernel_samsung_smdk4412-64ac72f81b1b41819dab596d1524bd5cae4813fd.tar.bz2 |
sched: Fix race in task_group()
commit 8323f26ce3425460769605a6aece7a174edaa7d1 upstream.
Stefan reported a crash on a kernel before a3e5d1091c1 ("sched:
Don't call task_group() too many times in set_task_rq()"), he
found the reason to be that the multiple task_group()
invocations in set_task_rq() returned different values.
Looking at all that I found a lack of serialization and plain
wrong comments.
The below tries to fix it using an extra pointer which is
updated under the appropriate scheduler locks. Its not pretty,
but I can't really see another way given how all the cgroup
stuff works.
Reported-and-tested-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1340364965.18025.71.camel@twins
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/init_task.h | 12 | ||||
-rw-r--r-- | include/linux/sched.h | 5 |
2 files changed, 15 insertions, 2 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 5e41a8e..921336f 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -124,8 +124,17 @@ extern struct group_info init_groups; extern struct cred init_cred; +extern struct task_group root_task_group; + +#ifdef CONFIG_CGROUP_SCHED +# define INIT_CGROUP_SCHED(tsk) \ + .sched_task_group = &root_task_group, +#else +# define INIT_CGROUP_SCHED(tsk) +#endif + #ifdef CONFIG_PERF_EVENTS -# define INIT_PERF_EVENTS(tsk) \ +# define INIT_PERF_EVENTS(tsk) \ .perf_event_mutex = \ __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), @@ -160,6 +169,7 @@ extern struct cred init_cred; }, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ INIT_PUSHABLE_TASKS(tsk) \ + INIT_CGROUP_SCHED(tsk) \ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ .real_parent = &tsk, \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 443ec43..0dae42e7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1235,6 +1235,9 @@ struct task_struct { const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; +#ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; +#endif #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -2613,7 +2616,7 @@ extern int sched_group_set_rt_period(struct task_group *tg, extern long sched_group_rt_period(struct task_group *tg); extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); #endif -#endif +#endif /* CONFIG_CGROUP_SCHED */ extern int task_can_switch_user(struct user_struct *up, struct task_struct *tsk); |