aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 14:57:15 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 16:24:41 -0700
commita3f21bce1fefdf92a4d1705e888d390b10f3ac6f (patch)
tree1b77f5f5a8516737e3f1f62290c08fe093cff661 /kernel
parent7897986bad8f6cd50d6149345aca7f6480f49464 (diff)
downloadkernel_samsung_smdk4412-a3f21bce1fefdf92a4d1705e888d390b10f3ac6f.zip
kernel_samsung_smdk4412-a3f21bce1fefdf92a4d1705e888d390b10f3ac6f.tar.gz
kernel_samsung_smdk4412-a3f21bce1fefdf92a4d1705e888d390b10f3ac6f.tar.bz2
[PATCH] sched: tweak affine wakeups
Do less affine wakeups. We're trying to reduce dbt2-pgsql idle time regressions here... make sure we don't don't move tasks the wrong way in an imbalance condition. Also, remove the cache coldness requirement from the calculation - this seems to induce sharp cutoff points where behaviour will suddenly change on some workloads if the load creeps slightly over or under some point. It is good for periodic balancing because in that case have otherwise have no other context to determine what task to move. But also make a minor tweak to "wake balancing" - the imbalance tolerance is now set at half the domain's imbalance, so we get the opportunity to do wake balancing before the more random periodic rebalancing gets preformed. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b597b07..5ae3568 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1016,38 +1016,45 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
int idx = this_sd->wake_idx;
unsigned int imbalance;
+ imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
+
load = source_load(cpu, idx);
this_load = target_load(this_cpu, idx);
- /*
- * If sync wakeup then subtract the (maximum possible) effect of
- * the currently running task from the load of the current CPU:
- */
- if (sync)
- this_load -= SCHED_LOAD_SCALE;
-
- /* Don't pull the task off an idle CPU to a busy one */
- if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
- goto out_set_cpu;
-
new_cpu = this_cpu; /* Wake to this CPU if we can */
- if ((this_sd->flags & SD_WAKE_AFFINE) &&
- !task_hot(p, rq->timestamp_last_tick, this_sd)) {
- /*
- * This domain has SD_WAKE_AFFINE and p is cache cold
- * in this domain.
- */
- schedstat_inc(this_sd, ttwu_move_affine);
- goto out_set_cpu;
- } else if ((this_sd->flags & SD_WAKE_BALANCE) &&
- imbalance*this_load <= 100*load) {
+ if (this_sd->flags & SD_WAKE_AFFINE) {
+ unsigned long tl = this_load;
/*
- * This domain has SD_WAKE_BALANCE and there is
- * an imbalance.
+ * If sync wakeup then subtract the (maximum possible)
+ * effect of the currently running task from the load
+ * of the current CPU:
*/
- schedstat_inc(this_sd, ttwu_move_balance);
- goto out_set_cpu;
+ if (sync)
+ tl -= SCHED_LOAD_SCALE;
+
+ if ((tl <= load &&
+ tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) ||
+ 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) {
+ /*
+ * This domain has SD_WAKE_AFFINE and
+ * p is cache cold in this domain, and
+ * there is no bad imbalance.
+ */
+ schedstat_inc(this_sd, ttwu_move_affine);
+ goto out_set_cpu;
+ }
+ }
+
+ /*
+ * Start passive balancing when half the imbalance_pct
+ * limit is reached.
+ */
+ if (this_sd->flags & SD_WAKE_BALANCE) {
+ if (imbalance*this_load <= 100*load) {
+ schedstat_inc(this_sd, ttwu_move_balance);
+ goto out_set_cpu;
+ }
}
}