aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authormarkcs <mcampbellsmith@gmail.com>2014-04-28 21:10:05 +1000
committermarkcs <mcampbellsmith@gmail.com>2014-04-28 21:13:34 +1000
commiteba4e205b4e3d82ab8dd6bb6855d51d4f214e153 (patch)
tree3abbe4e34435df7ded748aa13b238f64852ab86a /drivers
parentc538f742a9e67eca1ccdc801e8173a7e4ce30644 (diff)
downloadkernel_samsung_smdk4412-eba4e205b4e3d82ab8dd6bb6855d51d4f214e153.zip
kernel_samsung_smdk4412-eba4e205b4e3d82ab8dd6bb6855d51d4f214e153.tar.gz
kernel_samsung_smdk4412-eba4e205b4e3d82ab8dd6bb6855d51d4f214e153.tar.bz2
smdk4412: update sound soc and codecs
Includes updated kernel source from i9305 Change-Id: I91ae18b30d02de037701250c46a457d035da56e1
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c34
-rw-r--r--drivers/cpufreq/cpufreq_pegasusq.c28
-rw-r--r--drivers/cpufreq/powernow-k8.c56
3 files changed, 55 insertions, 63 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e151adc..a87dc5d 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -85,7 +85,6 @@ struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
cputime64_t prev_cpu_iowait;
cputime64_t prev_cpu_wall;
- unsigned int prev_cpu_wall_delta;
cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
struct delayed_work work;
@@ -609,10 +608,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
unsigned int idle_time, wall_time, iowait_time;
unsigned int load, load_freq;
int freq_avg;
- bool deep_sleep_detected = false;
- /* the evil magic numbers, only 2 at least */
- const unsigned int deep_sleep_backoff = 10;
- const unsigned int deep_sleep_factor = 5;
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
@@ -623,32 +618,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
- /*
- * Ignore wall delta jitters in both directions. An
- * exceptionally long wall_time will likely result
- * idle but it was waken up to do work so the next
- * slice is less likely to want to run at low
- * frequency. Let's evaluate the next slice instead of
- * the idle long one that passed already and it's too
- * late to reduce in frequency. As opposed an
- * exceptionally short slice that just run at low
- * frequency is unlikely to be idle, but we may go
- * back to idle pretty soon and that not idle slice
- * already passed. If short slices will keep coming
- * after a series of long slices the exponential
- * backoff will converge faster and we'll react faster
- * to high load. As opposed we'll decay slower
- * towards low load and long idle times.
- */
- if (j_dbs_info->prev_cpu_wall_delta >
- wall_time * deep_sleep_factor ||
- j_dbs_info->prev_cpu_wall_delta * deep_sleep_factor <
- wall_time)
- deep_sleep_detected = true;
- j_dbs_info->prev_cpu_wall_delta =
- (j_dbs_info->prev_cpu_wall_delta * deep_sleep_backoff
- + wall_time) / (deep_sleep_backoff+1);
-
idle_time = (unsigned int) cputime64_sub(cur_idle_time,
j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
@@ -674,9 +643,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
- if (deep_sleep_detected)
- continue;
-
/*
* For the purpose of ondemand, waiting for disk IO is an
* indication that you're performance critical, and not that
diff --git a/drivers/cpufreq/cpufreq_pegasusq.c b/drivers/cpufreq/cpufreq_pegasusq.c
index 208a991..c44af54 100644
--- a/drivers/cpufreq/cpufreq_pegasusq.c
+++ b/drivers/cpufreq/cpufreq_pegasusq.c
@@ -165,7 +165,7 @@ static unsigned int get_nr_run_avg(void)
#define DEF_START_DELAY (0)
#define UP_THRESHOLD_AT_MIN_FREQ (40)
-#define FREQ_FOR_RESPONSIVENESS (400000)
+#define FREQ_FOR_RESPONSIVENESS (500000)
#define HOTPLUG_DOWN_INDEX (0)
#define HOTPLUG_UP_INDEX (1)
@@ -306,7 +306,7 @@ static void apply_hotplug_lock(void)
lock = atomic_read(&g_hotplug_lock);
flag = lock - online;
- if (flag == 0)
+ if (lock == 0 || flag == 0)
return;
work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work;
@@ -380,6 +380,13 @@ void cpufreq_pegasusq_min_cpu_unlock(void)
lock = atomic_read(&g_hotplug_lock);
if (lock == 0)
return;
+#if defined(CONFIG_HAS_EARLYSUSPEND) && EARLYSUSPEND_HOTPLUGLOCK
+ if (dbs_tuners_ins.early_suspend >= 0) { /* if LCD is off-state */
+ atomic_set(&g_hotplug_lock, 1);
+ apply_hotplug_lock();
+ return;
+ }
+#endif
flag = lock - online;
if (flag >= 0)
return;
@@ -484,6 +491,21 @@ static ssize_t show_hotplug_lock(struct kobject *kobj,
return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock));
}
+static ssize_t show_cpucore_table(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ ssize_t count = 0;
+ int i;
+
+ for (i = CONFIG_NR_CPUS; i > 0; i--) {
+ count += sprintf(&buf[count], "%d ", i);
+ }
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+}
+
+
#define show_hotplug_param(file_name, num_core, up_down) \
static ssize_t show_##file_name##_##num_core##_##up_down \
(struct kobject *kobj, struct attribute *attr, char *buf) \
@@ -813,6 +835,7 @@ define_one_global_rw(max_cpu_lock);
define_one_global_rw(min_cpu_lock);
define_one_global_rw(hotplug_lock);
define_one_global_rw(dvfs_debug);
+define_one_global_ro(cpucore_table);
static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
@@ -846,6 +869,7 @@ static struct attribute *dbs_attributes[] = {
&hotplug_rq_3_0.attr,
&hotplug_rq_3_1.attr,
&hotplug_rq_4_0.attr,
+ &cpucore_table.attr,
NULL
};
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index f6cd315..ad683ec 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/cpumask.h>
+#include <linux/sched.h> /* for current / set_cpus_allowed() */
#include <linux/io.h>
#include <linux/delay.h>
@@ -1131,23 +1132,16 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
return res;
}
-struct powernowk8_target_arg {
- struct cpufreq_policy *pol;
- unsigned targfreq;
- unsigned relation;
-};
-
-static long powernowk8_target_fn(void *arg)
+/* Driver entry point to switch to the target frequency */
+static int powernowk8_target(struct cpufreq_policy *pol,
+ unsigned targfreq, unsigned relation)
{
- struct powernowk8_target_arg *pta = arg;
- struct cpufreq_policy *pol = pta->pol;
- unsigned targfreq = pta->targfreq;
- unsigned relation = pta->relation;
+ cpumask_var_t oldmask;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid;
u32 checkvid;
unsigned int newstate;
- int ret;
+ int ret = -EIO;
if (!data)
return -EINVAL;
@@ -1155,16 +1149,29 @@ static long powernowk8_target_fn(void *arg)
checkfid = data->currfid;
checkvid = data->currvid;
+ /* only run on specific CPU from here on. */
+ /* This is poor form: use a workqueue or smp_call_function_single */
+ if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(oldmask, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
+
+ if (smp_processor_id() != pol->cpu) {
+ printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
+ goto err_out;
+ }
+
if (pending_bit_stuck()) {
printk(KERN_ERR PFX "failing targ, change pending bit set\n");
- return -EIO;
+ goto err_out;
}
pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
pol->cpu, targfreq, pol->min, pol->max, relation);
if (query_current_values_with_pending_wait(data))
- return -EIO;
+ goto err_out;
if (cpu_family != CPU_HW_PSTATE) {
pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
@@ -1182,7 +1189,7 @@ static long powernowk8_target_fn(void *arg)
if (cpufreq_frequency_table_target(pol, data->powernow_table,
targfreq, relation, &newstate))
- return -EIO;
+ goto err_out;
mutex_lock(&fidvid_mutex);
@@ -1195,8 +1202,9 @@ static long powernowk8_target_fn(void *arg)
ret = transition_frequency_fidvid(data, newstate);
if (ret) {
printk(KERN_ERR PFX "transition frequency failed\n");
+ ret = 1;
mutex_unlock(&fidvid_mutex);
- return 1;
+ goto err_out;
}
mutex_unlock(&fidvid_mutex);
@@ -1205,18 +1213,12 @@ static long powernowk8_target_fn(void *arg)
data->powernow_table[newstate].index);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
+ ret = 0;
- return 0;
-}
-
-/* Driver entry point to switch to the target frequency */
-static int powernowk8_target(struct cpufreq_policy *pol,
- unsigned targfreq, unsigned relation)
-{
- struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
- .relation = relation };
-
- return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+err_out:
+ set_cpus_allowed_ptr(current, oldmask);
+ free_cpumask_var(oldmask);
+ return ret;
}
/* Driver entry point to verify the policy and range of frequencies */