aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c34
1 files changed, 34 insertions, 0 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index a87dc5d..e151adc 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -85,6 +85,7 @@ struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
cputime64_t prev_cpu_iowait;
cputime64_t prev_cpu_wall;
+ unsigned int prev_cpu_wall_delta;
cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
struct delayed_work work;
@@ -608,6 +609,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
unsigned int idle_time, wall_time, iowait_time;
unsigned int load, load_freq;
int freq_avg;
+ bool deep_sleep_detected = false;
+ /* the evil magic numbers, only 2 at least */
+ const unsigned int deep_sleep_backoff = 10;
+ const unsigned int deep_sleep_factor = 5;
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
@@ -618,6 +623,32 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
+ /*
+ * Ignore wall delta jitters in both directions. An
+ * exceptionally long wall_time will likely result
+ * idle but it was waken up to do work so the next
+ * slice is less likely to want to run at low
+ * frequency. Let's evaluate the next slice instead of
+ * the idle long one that passed already and it's too
+ * late to reduce in frequency. As opposed an
+ * exceptionally short slice that just run at low
+ * frequency is unlikely to be idle, but we may go
+ * back to idle pretty soon and that not idle slice
+ * already passed. If short slices will keep coming
+ * after a series of long slices the exponential
+ * backoff will converge faster and we'll react faster
+ * to high load. As opposed we'll decay slower
+ * towards low load and long idle times.
+ */
+ if (j_dbs_info->prev_cpu_wall_delta >
+ wall_time * deep_sleep_factor ||
+ j_dbs_info->prev_cpu_wall_delta * deep_sleep_factor <
+ wall_time)
+ deep_sleep_detected = true;
+ j_dbs_info->prev_cpu_wall_delta =
+ (j_dbs_info->prev_cpu_wall_delta * deep_sleep_backoff
+ + wall_time) / (deep_sleep_backoff+1);
+
idle_time = (unsigned int) cputime64_sub(cur_idle_time,
j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
@@ -643,6 +674,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
+ if (deep_sleep_detected)
+ continue;
+
/*
* For the purpose of ondemand, waiting for disk IO is an
* indication that you're performance critical, and not that