aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-02-09 11:53:18 +0100
committerBen Hutchings <ben@decadent.org.uk>2015-05-09 23:16:18 +0100
commit59c51ae041f69c207ec77ff836be3b59f8a74472 (patch)
tree042f856d6233407a12da21469b4bd417c9f23b80 /kernel
parente41f30294b426c6337aa182dc5adc4fea8d2a68a (diff)
downloadkernel_samsung_smdk4412-59c51ae041f69c207ec77ff836be3b59f8a74472.zip
kernel_samsung_smdk4412-59c51ae041f69c207ec77ff836be3b59f8a74472.tar.gz
kernel_samsung_smdk4412-59c51ae041f69c207ec77ff836be3b59f8a74472.tar.bz2
sched/autogroup: Fix failure to set cpu.rt_runtime_us
commit 1fe89e1b6d270aa0d3452c60d38461ea589594e3 upstream. Because task_group() uses a cache of autogroup_task_group(), whose output depends on sched_class, switching classes can generate problems. In particular, when started as fair, the cache points to the autogroup, so when switching to RT the tg_rt_schedulable() test fails for every cpu.rt_{runtime,period}_us change because now the autogroup has tasks and no runtime. Furthermore, going back to the previous semantics of varying task_group() with sched_class has the down-side that the sched_debug output varies as well, even though the task really is in the autogroup. Therefore add an autogroup exception to tg_has_rt_tasks() -- such that both (all) task_group() usages in sched/core now have one. And remove all the remnants of the variable task_group() output. Reported-by: Zefan Li <lizefan@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Stefan Bader <stefan.bader@canonical.com> Fixes: 8323f26ce342 ("sched: Fix race in task_group()") Link: http://lkml.kernel.org/r/20150209112237.GR5029@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org> [bwh: Backported to 3.2: adjust filenames, context] Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/sched_autogroup.c6
2 files changed, 7 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ea85b0d..29e5bf1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -9104,6 +9104,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *g, *p;
+ /*
+ * Autogroups do not have RT tasks; see autogroup_create().
+ */
+ if (task_group_is_autogroup(tg))
+ return 0;
+
do_each_thread(g, p) {
if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
return 1;
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index f280df1..cb0a950 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -86,8 +86,7 @@ static inline struct autogroup *autogroup_create(void)
* so we don't have to move tasks around upon policy change,
* or flail around trying to allocate bandwidth on the fly.
* A bandwidth exception in __sched_setscheduler() allows
- * the policy change to proceed. Thereafter, task_group()
- * returns &root_task_group, so zero bandwidth is required.
+ * the policy change to proceed.
*/
free_rt_sched_group(tg);
tg->rt_se = root_task_group.rt_se;
@@ -114,9 +113,6 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
if (tg != &root_task_group)
return false;
- if (p->sched_class != &fair_sched_class)
- return false;
-
/*
* We can only assume the task group can't go away on us if
* autogroup_move_group() can see us on ->thread_group list.