aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/devfreq
diff options
context:
space:
mode:
authorcodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
committercodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
commitc6da2cfeb05178a11c6d062a06f8078150ee492f (patch)
treef3b4021d252c52d6463a9b3c1bb7245e399b009c /drivers/devfreq
parentc6d7c4dbff353eac7919342ae6b3299a378160a6 (diff)
downloadkernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2
samsung update 1
Diffstat (limited to 'drivers/devfreq')
-rw-r--r--drivers/devfreq/Kconfig94
-rw-r--r--drivers/devfreq/Makefile9
-rw-r--r--drivers/devfreq/devfreq.c876
-rw-r--r--drivers/devfreq/exynos4_bus.c1509
-rw-r--r--drivers/devfreq/exynos4_display.c388
-rw-r--r--drivers/devfreq/governor.h24
-rw-r--r--drivers/devfreq/governor_performance.c32
-rw-r--r--drivers/devfreq/governor_powersave.c29
-rw-r--r--drivers/devfreq/governor_simpleondemand.c94
-rw-r--r--drivers/devfreq/governor_userspace.c116
10 files changed, 3171 insertions, 0 deletions
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
new file mode 100644
index 0000000..7bc77c9
--- /dev/null
+++ b/drivers/devfreq/Kconfig
@@ -0,0 +1,94 @@
+menuconfig PM_DEVFREQ
+ bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
+ help
+ A device may have a list of frequencies and voltages available.
+ devfreq, a generic DVFS framework can be registered for a device
+ in order to let the governor provided to devfreq choose an
+ operating frequency based on the device driver's policy.
+
+ Each device may have its own governor and policy. Devfreq can
+ reevaluate the device state periodically and/or based on the
+ notification to "nb", a notifier block, of devfreq.
+
+ Like some CPUs with CPUfreq, a device may have multiple clocks.
+ However, because the clock frequencies of a single device are
+ determined by the single device's state, an instance of devfreq
+ is attached to a single device and returns a "representative"
+ clock frequency of the device, which is also attached
+ to a device by 1-to-1. The device registering devfreq takes the
+ responsiblity to "interpret" the representative frequency and
+ to set its every clock accordingly with the "target" callback
+ given to devfreq.
+
+ When OPP is used with the devfreq device, it is recommended to
+ register devfreq's nb to the OPP's notifier head. If OPP is
+ used with the devfreq device, you may use OPP helper
+ functions defined in devfreq.h.
+
+if PM_DEVFREQ
+
+comment "DEVFREQ Governors"
+
+config DEVFREQ_GOV_SIMPLE_ONDEMAND
+ bool "Simple Ondemand"
+ help
+ Chooses frequency based on the recent load on the device. Works
+ similar as ONDEMAND governor of CPUFREQ does. A device with
+ Simple-Ondemand should be able to provide busy/total counter
+ values that imply the usage rate. A device may provide tuned
+ values to the governor with data field at devfreq_add_device().
+
+config DEVFREQ_GOV_PERFORMANCE
+ bool "Performance"
+ help
+ Sets the frequency at the maximum available frequency.
+ This governor always returns UINT_MAX as frequency so that
+ the DEVFREQ framework returns the highest frequency available
+ at any time.
+
+config DEVFREQ_GOV_POWERSAVE
+ bool "Powersave"
+ help
+ Sets the frequency at the minimum available frequency.
+ This governor always returns 0 as frequency so that
+ the DEVFREQ framework returns the lowest frequency available
+ at any time.
+
+config DEVFREQ_GOV_USERSPACE
+ bool "Userspace"
+ help
+ Sets the frequency at the user specified one.
+ This governor returns the user configured frequency if there
+ has been an input to /sys/devices/.../power/devfreq_set_freq.
+ Otherwise, the governor does not change the frequnecy
+ given at the initialization.
+
+comment "DEVFREQ Drivers"
+
+config ARM_EXYNOS4_BUS_DEVFREQ
+ bool "ARM Exynos4 Memory/Bus DEVFREQ Driver (4210/4212/4412) (Experimental)"
+ depends on CPU_EXYNOS4210 || CPU_EXYNOS4412 || CPU_EXYNOS4212
+ select ARCH_HAS_OPP
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ help
+ This adds the DEVFREQ driver for Exynos4 memory bus.
+ This driver is supposed to support busses of other Exynos4 series
+ SoCs as well; however, for now, this driver supports Exynos4210,
+ Exynos4212, and Exynos4412 only. It reads PPMU counters of memory
+ controllers and adjusts the operating frequencies and voltages with
+ OPP support.
+
+config ARM_EXYNOS4_DISPLAY_DEVFREQ
+ bool "ARM Exynos4 Dynamic LCD refresh rate DEVFREQ Driver (4210/4212/4412) (Experimental)"
+ depends on CPU_EXYNOS4210 || CPU_EXYNOS4412 || CPU_EXYNOS4212
+ select ARCH_HAS_OPP
+ select DEVFREQ_GOV_POWERSAVE
+ help
+ This adds the DEVFREQ driver for Exynos4 dynamic LCD refresh rate
+ This driver is supposed to support display of other Exynos4 series
+ SoCs as well; however, for now, this driver supports Exynos4210,
+ Exynos4212, and Exynos4412 only. It received event(High frequency or
+ Low frequency) from various devices to control dynamically LCD refresh
+ rate.
+
+endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
new file mode 100644
index 0000000..1b61517
--- /dev/null
+++ b/drivers/devfreq/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
+obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
+obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
+obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
+obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
+obj-$(CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ) += exynos4_display.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
new file mode 100644
index 0000000..3ef0586
--- /dev/null
+++ b/drivers/devfreq/devfreq.c
@@ -0,0 +1,876 @@
+/*
+ * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
+ * for Non-CPU Devices.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/hrtimer.h>
+#include <linux/pm_qos_params.h>
+#include "governor.h"
+
+struct class *devfreq_class;
+
+/*
+ * devfreq_work periodically monitors every registered device.
+ * The minimum polling interval is one jiffy. The polling interval is
+ * determined by the minimum polling period among all polling devfreq
+ * devices. The resolution of polling interval is one jiffy.
+ */
+static bool polling;
+static struct workqueue_struct *devfreq_wq;
+static struct delayed_work devfreq_work;
+
+/* wait removing if this is to be removed */
+static struct devfreq *wait_remove_device;
+
+/* The list of all device-devfreq */
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(devfreq_list_lock);
+
+/**
+ * find_device_devfreq() - find devfreq struct using device pointer
+ * @dev: device pointer used to lookup device devfreq.
+ *
+ * Search the list of device devfreqs and return the matched device's
+ * devfreq info. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq *find_device_devfreq(struct device *dev)
+{
+ struct devfreq *tmp_devfreq;
+
+ if (unlikely(IS_ERR_OR_NULL(dev))) {
+ pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ WARN(!mutex_is_locked(&devfreq_list_lock),
+ "devfreq_list_lock must be locked.");
+
+ list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
+ if (tmp_devfreq->dev.parent == dev)
+ return tmp_devfreq;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+/**
+ * update_devfreq() - Reevaluate the device and configure frequency.
+ * @devfreq: the devfreq instance.
+ *
+ * Note: Lock devfreq->lock before calling update_devfreq
+ * This function is exported for governors.
+ */
+int update_devfreq(struct devfreq *devfreq)
+{
+ unsigned long freq;
+ int err = 0;
+ u32 options = 0;
+
+ if (!mutex_is_locked(&devfreq->lock)) {
+ WARN(true, "devfreq->lock must be locked by the caller.\n");
+ return -EINVAL;
+ }
+
+ /* Reevaluate the proper frequency */
+ err = devfreq->governor->get_target_freq(devfreq, &freq);
+ if (err)
+ return err;
+
+ /*
+ * Adjust the freuqency with user freq and QoS.
+ *
+ * List from the highest proiority
+ * min_freq
+ * max_freq
+ * qos_min_freq
+ */
+
+ if (devfreq->qos_min_freq && freq < devfreq->qos_min_freq) {
+ freq = devfreq->qos_min_freq;
+ options &= ~(1 << 0);
+ options |= DEVFREQ_OPTION_FREQ_LUB;
+ }
+ if (devfreq->max_freq && freq > devfreq->max_freq) {
+ freq = devfreq->max_freq;
+ options &= ~(1 << 0);
+ options |= DEVFREQ_OPTION_FREQ_GLB;
+ }
+ if (devfreq->min_freq && freq < devfreq->min_freq) {
+ freq = devfreq->min_freq;
+ options &= ~(1 << 0);
+ options |= DEVFREQ_OPTION_FREQ_LUB;
+ }
+
+ err = devfreq->profile->target(devfreq->dev.parent, &freq, options);
+ if (err)
+ return err;
+
+ devfreq->previous_freq = freq;
+ return err;
+}
+
+/**
+ * devfreq_notifier_call() - Notify that the device frequency requirements
+ * has been changed out of devfreq framework.
+ * @nb the notifier_block (supposed to be devfreq->nb)
+ * @val not used.
+ * @devp not used
+ *
+ * Called by a notifier that uses devfreq->nb.
+ */
+static int devfreq_notifier_call(struct notifier_block *nb, unsigned long val,
+ void *devp)
+{
+ struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
+ int ret;
+
+ mutex_lock(&devfreq->lock);
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+
+ return ret;
+}
+
+/**
+ * devfreq_qos_notifier_call() -
+ */
+static int devfreq_qos_notifier_call(struct notifier_block *nb,
+ unsigned long value, void *devp)
+{
+ struct devfreq *devfreq = container_of(nb, struct devfreq, qos_nb);
+ int ret;
+ int i;
+ unsigned long default_value = 0;
+ struct devfreq_pm_qos_table *qos_list = devfreq->profile->qos_list;
+ bool qos_use_max = devfreq->profile->qos_use_max;
+
+ if (!qos_list)
+ return NOTIFY_DONE;
+
+ mutex_lock(&devfreq->lock);
+
+ switch (devfreq->profile->qos_type) {
+ case PM_QOS_CPU_DMA_LATENCY:
+ default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ break;
+ case PM_QOS_NETWORK_LATENCY:
+ default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE;
+ break;
+ case PM_QOS_NETWORK_THROUGHPUT:
+ default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE;
+ break;
+ case PM_QOS_BUS_DMA_THROUGHPUT:
+ default_value = PM_QOS_BUS_DMA_THROUGHPUT_DEFAULT_VALUE;
+ break;
+ case PM_QOS_DISPLAY_FREQUENCY:
+ default_value = PM_QOS_DISPLAY_FREQUENCY_DEFAULT_VALUE;
+ break;
+ default:
+ /* Won't do any check to detect "default" state */
+ break;
+ }
+
+ if (value == default_value) {
+ devfreq->qos_min_freq = 0;
+ goto update;
+ }
+
+ for (i = 0; qos_list[i].freq; i++) {
+ /* QoS Met */
+ if ((qos_use_max && qos_list[i].qos_value >= value) ||
+ (!qos_use_max && qos_list[i].qos_value <= value)) {
+ devfreq->qos_min_freq = qos_list[i].freq;
+ goto update;
+ }
+ }
+
+ /* Use the highest QoS freq */
+ if (i > 0)
+ devfreq->qos_min_freq = qos_list[i - 1].freq;
+
+update:
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+
+ return ret;
+}
+
+/**
+ * _remove_devfreq() - Remove devfreq from the device.
+ * @devfreq: the devfreq struct
+ * @skip: skip calling device_unregister().
+ *
+ * Note that the caller should lock devfreq->lock before calling
+ * this. _remove_devfreq() will unlock it and free devfreq
+ * internally. devfreq_list_lock should be locked by the caller
+ * as well (not relased at return)
+ *
+ * Lock usage:
+ * devfreq->lock: locked before call.
+ * unlocked at return (and freed)
+ * devfreq_list_lock: locked before call.
+ * kept locked at return.
+ * if devfreq is centrally polled.
+ *
+ * Freed memory:
+ * devfreq
+ */
+static void _remove_devfreq(struct devfreq *devfreq, bool skip)
+{
+ if (!mutex_is_locked(&devfreq->lock)) {
+ WARN(true, "devfreq->lock must be locked by the caller.\n");
+ return;
+ }
+ if (!devfreq->governor->no_central_polling &&
+ !mutex_is_locked(&devfreq_list_lock)) {
+ WARN(true, "devfreq_list_lock must be locked by the caller.\n");
+ return;
+ }
+
+ if (devfreq->being_removed)
+ return;
+
+ devfreq->being_removed = true;
+
+ if (devfreq->profile->qos_type)
+ pm_qos_remove_notifier(devfreq->profile->qos_type,
+ &devfreq->qos_nb);
+
+ if (devfreq->profile->exit)
+ devfreq->profile->exit(devfreq->dev.parent);
+
+ if (devfreq->governor->exit)
+ devfreq->governor->exit(devfreq);
+
+ if (!skip && get_device(&devfreq->dev)) {
+ device_unregister(&devfreq->dev);
+ put_device(&devfreq->dev);
+ }
+
+ if (!devfreq->governor->no_central_polling)
+ list_del(&devfreq->node);
+
+ mutex_unlock(&devfreq->lock);
+ mutex_destroy(&devfreq->lock);
+
+ kfree(devfreq);
+}
+
+/**
+ * devfreq_dev_release() - Callback for struct device to release the device.
+ * @dev: the devfreq device
+ *
+ * This calls _remove_devfreq() if _remove_devfreq() is not called.
+ * Note that devfreq_dev_release() could be called by _remove_devfreq() as
+ * well as by others unregistering the device.
+ */
+static void devfreq_dev_release(struct device *dev)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ bool central_polling = !devfreq->governor->no_central_polling;
+
+ /*
+ * If devfreq_dev_release() was called by device_unregister() of
+ * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
+ * being_removed is already set. This also partially checks the case
+ * where devfreq_dev_release() is called from a thread other than
+ * the one called _remove_devfreq(); however, this case is
+ * dealt completely with another following being_removed check.
+ *
+ * Because being_removed is never being
+ * unset, we do not need to worry about race conditions on
+ * being_removed.
+ */
+ if (devfreq->being_removed)
+ return;
+
+ if (central_polling)
+ mutex_lock(&devfreq_list_lock);
+
+ mutex_lock(&devfreq->lock);
+
+ /*
+ * Check being_removed flag again for the case where
+ * devfreq_dev_release() was called in a thread other than the one
+ * possibly called _remove_devfreq().
+ */
+ if (devfreq->being_removed) {
+ mutex_unlock(&devfreq->lock);
+ goto out;
+ }
+
+ /* devfreq->lock is unlocked and removed in _removed_devfreq() */
+ _remove_devfreq(devfreq, true);
+
+out:
+ if (central_polling)
+ mutex_unlock(&devfreq_list_lock);
+}
+
+/**
+ * devfreq_monitor() - Periodically poll devfreq objects.
+ * @work: the work struct used to run devfreq_monitor periodically.
+ *
+ */
+static void devfreq_monitor(struct work_struct *work)
+{
+ static unsigned long last_polled_at;
+ struct devfreq *devfreq, *tmp;
+ int error;
+ unsigned long jiffies_passed;
+ unsigned long next_jiffies = ULONG_MAX, now = jiffies;
+ struct device *dev;
+
+ /* Initially last_polled_at = 0, polling every device at bootup */
+ jiffies_passed = now - last_polled_at;
+ last_polled_at = now;
+ if (jiffies_passed == 0)
+ jiffies_passed = 1;
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
+ mutex_lock(&devfreq->lock);
+ dev = devfreq->dev.parent;
+
+ /* Do not remove tmp for a while */
+ wait_remove_device = tmp;
+
+ if (devfreq->governor->no_central_polling ||
+ devfreq->next_polling == 0) {
+ mutex_unlock(&devfreq->lock);
+ continue;
+ }
+ mutex_unlock(&devfreq_list_lock);
+
+ /*
+ * Reduce more next_polling if devfreq_wq took an extra
+ * delay. (i.e., CPU has been idled.)
+ */
+ if (devfreq->next_polling <= jiffies_passed) {
+ error = update_devfreq(devfreq);
+
+ /* Remove a devfreq with an error. */
+ if (error && error != -EAGAIN) {
+
+ dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
+ error, devfreq->governor->name);
+
+ /*
+ * Unlock devfreq before locking the list
+ * in order to avoid deadlock with
+ * find_device_devfreq or others
+ */
+ mutex_unlock(&devfreq->lock);
+ mutex_lock(&devfreq_list_lock);
+ /* Check if devfreq is already removed */
+ if (IS_ERR(find_device_devfreq(dev)))
+ continue;
+ mutex_lock(&devfreq->lock);
+ /* This unlocks devfreq->lock and free it */
+ _remove_devfreq(devfreq, false);
+ continue;
+ }
+ devfreq->next_polling = devfreq->polling_jiffies;
+ } else {
+ devfreq->next_polling -= jiffies_passed;
+ }
+
+ if (devfreq->next_polling)
+ next_jiffies = (next_jiffies > devfreq->next_polling) ?
+ devfreq->next_polling : next_jiffies;
+
+ mutex_unlock(&devfreq->lock);
+ mutex_lock(&devfreq_list_lock);
+ }
+ wait_remove_device = NULL;
+ mutex_unlock(&devfreq_list_lock);
+
+ if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
+ polling = true;
+ queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
+ } else {
+ polling = false;
+ }
+}
+
+/**
+ * devfreq_add_device() - Add devfreq feature to the device
+ * @dev: the device to add devfreq feature.
+ * @profile: device-specific profile to run devfreq.
+ * @governor: the policy to choose frequency.
+ * @data: private data for the governor. The devfreq framework does not
+ * touch this value.
+ */
+struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const struct devfreq_governor *governor,
+ void *data)
+{
+ struct devfreq *devfreq;
+ int err = 0;
+
+ if (!dev || !profile || !governor) {
+ dev_err(dev, "%s: Invalid parameters.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ if (!governor->no_central_polling) {
+ mutex_lock(&devfreq_list_lock);
+ devfreq = find_device_devfreq(dev);
+ mutex_unlock(&devfreq_list_lock);
+ if (!IS_ERR(devfreq)) {
+ dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
+ if (!devfreq) {
+ dev_err(dev, "%s: Unable to create devfreq for the device\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ mutex_init(&devfreq->lock);
+ mutex_lock(&devfreq->lock);
+ devfreq->dev.parent = dev;
+ devfreq->dev.class = devfreq_class;
+ devfreq->dev.release = devfreq_dev_release;
+ devfreq->profile = profile;
+ devfreq->governor = governor;
+ devfreq->previous_freq = profile->initial_freq;
+ devfreq->data = data;
+ devfreq->next_polling = devfreq->polling_jiffies
+ = msecs_to_jiffies(devfreq->profile->polling_ms);
+ devfreq->nb.notifier_call = devfreq_notifier_call;
+ devfreq->qos_nb.notifier_call = devfreq_qos_notifier_call;
+
+ /* Check the sanity of qos_list/qos_type */
+ if (profile->qos_type || profile->qos_list) {
+ int i;
+ bool positive_corelation = false;
+
+ if (profile->qos_type == PM_QOS_CPU_DMA_LATENCY ||
+ profile->qos_type == PM_QOS_NETWORK_LATENCY) {
+ if (profile->qos_use_max) {
+ dev_err(dev, "qos_use_max value inconsistent\n");
+ err = -EINVAL;
+ }
+ } else {
+ if (!profile->qos_use_max) {
+ dev_err(dev, "qos_use_max value inconsistent\n");
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ goto err_dev;
+
+ if (!profile->qos_type || !profile->qos_list) {
+ dev_err(dev, "QoS requirement partially omitted.\n");
+ err = -EINVAL;
+ goto err_dev;
+ }
+
+ if (!profile->qos_list[0].freq) {
+ dev_err(dev, "The first QoS requirement is the end of list.\n");
+ err = -EINVAL;
+ goto err_dev;
+ }
+
+ for (i = 1; profile->qos_list[i].freq; i++) {
+ if (profile->qos_list[i].freq <=
+ profile->qos_list[i - 1].freq) {
+ dev_err(dev, "qos_list[].freq not sorted in the ascending order. ([%d]=%lu, [%d]=%lu)\n",
+ i - 1, profile->qos_list[i - 1].freq,
+ i, profile->qos_list[i].freq);
+ err = -EINVAL;
+ goto err_dev;
+ }
+
+ if (i == 1) {
+ if (profile->qos_list[0].qos_value <
+ profile->qos_list[1].qos_value)
+ positive_corelation = true;
+ continue;
+ }
+
+ if (((profile->qos_list[i - 1].qos_value <=
+ profile->qos_list[i].qos_value) &&
+ !positive_corelation)
+ ||
+ ((profile->qos_list[i - 1].qos_value >=
+ profile->qos_list[i].qos_value) &&
+ positive_corelation)) {
+ dev_err(dev, "qos_list[].qos_value not sorted.\n");
+ err = -EINVAL;
+ goto err_dev;
+ }
+ }
+
+ pm_qos_add_notifier(profile->qos_type, &devfreq->qos_nb);
+ }
+
+ dev_set_name(&devfreq->dev, dev_name(dev));
+ err = device_register(&devfreq->dev);
+ if (err) {
+ put_device(&devfreq->dev);
+ goto err_qos_add;
+ }
+
+ if (governor->init)
+ err = governor->init(devfreq);
+ if (err)
+ goto err_init;
+
+ mutex_unlock(&devfreq->lock);
+
+ if (governor->no_central_polling)
+ goto out;
+
+ mutex_lock(&devfreq_list_lock);
+
+ list_add(&devfreq->node, &devfreq_list);
+
+ if (devfreq_wq && devfreq->next_polling && !polling) {
+ polling = true;
+ queue_delayed_work(devfreq_wq, &devfreq_work,
+ devfreq->next_polling);
+ }
+ mutex_unlock(&devfreq_list_lock);
+ goto out;
+err_init:
+ device_unregister(&devfreq->dev);
+err_qos_add:
+ if (profile->qos_type || profile->qos_list)
+ pm_qos_remove_notifier(profile->qos_type, &devfreq->qos_nb);
+err_dev:
+ mutex_unlock(&devfreq->lock);
+ kfree(devfreq);
+out:
+ if (err)
+ return ERR_PTR(err);
+ else
+ return devfreq;
+}
+
+/**
+ * devfreq_remove_device() - Remove devfreq feature from a device.
+ * @devfreq the devfreq instance to be removed
+ */
+int devfreq_remove_device(struct devfreq *devfreq)
+{
+ bool central_polling;
+
+ if (!devfreq)
+ return -EINVAL;
+
+ central_polling = !devfreq->governor->no_central_polling;
+
+ if (central_polling) {
+ mutex_lock(&devfreq_list_lock);
+ while (wait_remove_device == devfreq) {
+ mutex_unlock(&devfreq_list_lock);
+ schedule();
+ mutex_lock(&devfreq_list_lock);
+ }
+ }
+
+ mutex_lock(&devfreq->lock);
+
+ _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
+
+ if (central_polling)
+ mutex_unlock(&devfreq_list_lock);
+
+ return 0;
+}
+
+static ssize_t show_governor(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
+}
+
+static ssize_t show_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
+}
+
+static ssize_t show_polling_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
+}
+
+static ssize_t store_polling_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ unsigned int value;
+ int ret;
+
+ ret = sscanf(buf, "%u", &value);
+ if (ret != 1)
+ goto out;
+
+ mutex_lock(&df->lock);
+ df->profile->polling_ms = value;
+ df->next_polling = df->polling_jiffies
+ = msecs_to_jiffies(value);
+ mutex_unlock(&df->lock);
+
+ ret = count;
+
+ if (df->governor->no_central_polling)
+ goto out;
+
+ mutex_lock(&devfreq_list_lock);
+ if (df->next_polling > 0 && !polling) {
+ polling = true;
+ queue_delayed_work(devfreq_wq, &devfreq_work,
+ df->next_polling);
+ }
+ mutex_unlock(&devfreq_list_lock);
+out:
+ return ret;
+}
+
+static ssize_t show_central_polling(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n",
+ !to_devfreq(dev)->governor->no_central_polling);
+}
+
+static ssize_t show_qos_min_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", to_devfreq(dev)->qos_min_freq);
+}
+
+static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ unsigned long value;
+ int ret;
+ unsigned long max;
+
+ ret = sscanf(buf, "%lu", &value);
+ if (ret != 1)
+ goto out;
+
+ mutex_lock(&df->lock);
+ max = df->max_freq;
+ if (value && max && value > max) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ df->min_freq = value;
+ update_devfreq(df);
+ ret = count;
+unlock:
+ mutex_unlock(&df->lock);
+out:
+ return ret;
+}
+
+static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
+}
+
+static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ unsigned long value;
+ int ret;
+ unsigned long min;
+
+ ret = sscanf(buf, "%lu", &value);
+ if (ret != 1)
+ goto out;
+
+ mutex_lock(&df->lock);
+ min = df->min_freq;
+ if (value && min && value < min) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ df->max_freq = value;
+ update_devfreq(df);
+ ret = count;
+unlock:
+ mutex_unlock(&df->lock);
+out:
+ return ret;
+}
+
+static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
+}
+
+static struct device_attribute devfreq_attrs[] = {
+ __ATTR(governor, S_IRUGO, show_governor, NULL),
+ __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
+ __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
+ __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
+ store_polling_interval),
+ __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
+ __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
+ __ATTR(qos_min_freq, S_IRUGO, show_qos_min_freq, NULL),
+ { },
+};
+
+/**
+ * devfreq_start_polling() - Initialize data structure for devfreq framework and
+ * start polling registered devfreq devices.
+ */
+static int __init devfreq_start_polling(void)
+{
+ mutex_lock(&devfreq_list_lock);
+ polling = false;
+ devfreq_wq = create_freezable_workqueue("devfreq_wq");
+ INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
+ mutex_unlock(&devfreq_list_lock);
+
+ devfreq_monitor(&devfreq_work.work);
+ return 0;
+}
+late_initcall(devfreq_start_polling);
+
+static int __init devfreq_init(void)
+{
+ devfreq_class = class_create(THIS_MODULE, "devfreq");
+ if (IS_ERR(devfreq_class)) {
+ pr_err("%s: couldn't create class\n", __FILE__);
+ return PTR_ERR(devfreq_class);
+ }
+ devfreq_class->dev_attrs = devfreq_attrs;
+ return 0;
+}
+subsys_initcall(devfreq_init);
+
+static void __exit devfreq_exit(void)
+{
+ class_destroy(devfreq_class);
+}
+module_exit(devfreq_exit);
+
+/*
+ * The followings are helper functions for devfreq user device drivers with
+ * OPP framework.
+ */
+
+/**
+ * devfreq_recommended_opp() - Helper function to get proper OPP for the
+ * freq value given to target callback.
+ * @dev The devfreq user device. (parent of devfreq)
+ * @freq The frequency given to target function
+ * @floor false: find LUB first and use GLB if LUB not available.
+ * true: find GLB first and use LUB if GLB not available.
+ *
+ * LUB: least upper bound (at least this freq or above, but the least)
+ * GLB: greatest lower bound (at most this freq or below, but the most)
+ *
+ */
+struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
+ bool floor)
+{
+ struct opp *opp;
+
+ if (floor) {
+ opp = opp_find_freq_floor(dev, freq);
+
+ if (opp == ERR_PTR(-ENODEV))
+ opp = opp_find_freq_ceil(dev, freq);
+ } else {
+ opp = opp_find_freq_ceil(dev, freq);
+
+ if (opp == ERR_PTR(-ENODEV))
+ opp = opp_find_freq_floor(dev, freq);
+ }
+
+ return opp;
+}
+
+/**
+ * devfreq_register_opp_notifier() - Helper function to get devfreq notified
+ * for any changes in the OPP availability
+ * changes
+ * @dev The devfreq user device. (parent of devfreq)
+ * @devfreq The devfreq object.
+ */
+int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
+{
+ struct srcu_notifier_head *nh = opp_get_notifier(dev);
+
+ if (IS_ERR(nh))
+ return PTR_ERR(nh);
+ return srcu_notifier_chain_register(nh, &devfreq->nb);
+}
+
+/**
+ * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
+ * notified for any changes in the OPP
+ * availability changes anymore.
+ * @dev The devfreq user device. (parent of devfreq)
+ * @devfreq The devfreq object.
+ *
+ * At exit() callback of devfreq_dev_profile, this must be included if
+ * devfreq_recommended_opp is used.
+ */
+int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
+{
+ struct srcu_notifier_head *nh = opp_get_notifier(dev);
+
+ if (IS_ERR(nh))
+ return PTR_ERR(nh);
+ return srcu_notifier_chain_unregister(nh, &devfreq->nb);
+}
+
+/**
+ * In progress (prototyping)
+ */
+int devfreq_simple_ondemand_flexrate_do(struct devfreq *devfreq,
+ unsigned long interval,
+ unsigned long number)
+{
+ return 0;
+}
+
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("devfreq class support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644
index 0000000..2d35f22
--- /dev/null
+++ b/drivers/devfreq/exynos4_bus.c
@@ -0,0 +1,1509 @@
+/* drivers/devfreq/exynos4_bus.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
+ * This version supports EXYNOS4210 only. This changes bus frequencies
+ * and vddint voltages. Exynos4412/4212 should be able to be supported
+ * with minor modifications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/devfreq/exynos4_bus.h>
+#include <linux/pm_qos_params.h>
+
+/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
+
+#include <mach/regs-clock.h>
+#include <mach/asv.h>
+#include <mach/ppmu.h>
+
+#include <plat/map-s5p.h>
+#include <plat/cpu.h>
+
+#define MAX_SAFEVOLT 1200000 /* 1.2V */
+
+enum exynos4_busf_type {
+ TYPE_BUSF_EXYNOS4210,
+ TYPE_BUSF_EXYNOS4212,
+ TYPE_BUSF_EXYNOS4412,
+};
+
+/* Assume that the bus is saturated if the utilization is 40% or 30(4x12)% */
+#define BUS_SATURATION_RATIO 40
+#define BUS_SATURATION_RATIO_4x12 30
+
+enum ppmu_counter_ {
+ _PPMU_PMNCNT0 = 0,
+ _PPMU_PMCCNT1,
+ _PPMU_PMNCNT2,
+ _PPMU_PMNCNT3,
+ _PPMU_PMNCNT_MAX,
+};
+struct exynos4_ppmu_ {
+ void __iomem *hw_base;
+ unsigned int ccnt;
+ unsigned int event;
+ unsigned int count[_PPMU_PMNCNT_MAX];
+ bool ccnt_overflow;
+ bool count_overflow[_PPMU_PMNCNT_MAX];
+};
+
+enum busclk_level_idx {
+ LV_0 = 0,
+ LV_1,
+ LV_2,
+ LV_3,
+ LV_4,
+ _LV_END
+};
+#define EX4210_LV_MAX LV_2
+#define EX4x12_LV_MAX LV_4
+#define EX4210_LV_NUM (LV_2 + 1)
+#define EX4x12_LV_NUM (LV_4 + 1)
+
+struct busfreq_data {
+ enum exynos4_busf_type type;
+ struct device *dev;
+ struct devfreq *devfreq;
+ bool disabled;
+ struct regulator *vdd_int;
+ struct regulator *vdd_mif; /* Exynos4412/4212 only */
+ struct opp *curr_opp;
+ struct exynos4_ppmu_ dmc[2];
+
+ /* Fix bus freq during suspend/wakeup */
+ struct notifier_block pm_notifier;
+
+ /* Gurantee high freq with high cpu freq */
+ struct notifier_block cpuf_notifier;
+ struct pm_qos_request_list cpuf_enforce;
+
+ struct mutex lock;
+
+ /* Dividers calculated at boot/probe-time */
+ unsigned int dmc_divtable[_LV_END]; /* DMC0 */
+ unsigned int top_divtable[_LV_END];
+
+ /* Exynos4x12 uses DMC_PAUSE */
+ unsigned int dmc_pause_ctrl;
+};
+
+struct bus_opp_table {
+ unsigned int idx;
+ unsigned long clk;
+ unsigned long volt;
+};
+
+/* 4210 controls clock of mif and voltage of int */
+static struct bus_opp_table exynos4210_busclk_table[] = {
+ {LV_0, 400000, 1150000},
+ {LV_1, 267000, 1050000},
+ {LV_2, 133000, 1025000},
+ {0, 0, 0},
+};
+
+/*
+ * MIF is the main control knob clock for exynox4x12 MIF/INT
+ * clock and voltage of both mif/int are controlled.
+ */
+static struct bus_opp_table exynos4x12_mifclk_table[] = {
+ {LV_0, 400000, 1100000},
+ {LV_1, 267000, 1000000},
+ {LV_2, 160000, 950000},
+ {LV_3, 133000, 950000},
+ {LV_4, 100000, 950000},
+ {0, 0, 0},
+};
+
+/*
+ * INT is not the control knob of 4x12. LV_x is not meant to represent
+ * the current performance. (MIF does)
+ */
+static struct bus_opp_table exynos4x12_intclk_table[] = {
+ {LV_0, 266000, 1100000},
+ {LV_1, 200000, 1000000},
+ {LV_2, 160000, 950000},
+ {LV_3, 133000, 925000},
+ {LV_4, 100000, 900000},
+ {0, 0, 0},
+};
+
+/* TODO: asv volt definitions are "__initdata"? */
+/* Some chips have different operating voltages */
+static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
+ {1150000, 1050000, 1050000},
+ {1125000, 1025000, 1025000},
+ {1100000, 1000000, 1000000},
+ {1075000, 975000, 975000},
+ {1050000, 950000, 950000},
+};
+
+/* DVFS Tables of Version 20120210 */
+static unsigned int exynos4212_mif_volt[][_LV_END] = {
+ /* 400 267 160 133 100 */
+ {1012500, 962500, 912500, 912500, 912500}, /* RESERVED */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV1 */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV2 */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV3 */
+ {1050000, 1000000, 900000, 900000, 900000}, /* ASV4 */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV5 */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV6 */
+ {950000, 900000, 900000, 900000, 900000}, /* ASV7 */
+ {950000, 900000, 900000, 900000, 850000}, /* ASV8 */
+ {950000, 900000, 900000, 900000, 850000}, /* ASV9 */
+ {950000, 900000, 900000, 850000, 850000}, /* ASV10 */
+ {937500, 887500, 887500, 850000, 850000}, /* RESERVED */
+};
+
+static unsigned int exynos4212_int_volt[][_LV_END] = {
+ /* 266 200 160 133 100 */
+ {1300000, 1250000, 950000, 912500, 887500}, /* RESERVED */
+ {1062500, 1012500, 937500, 900000, 875000}, /* ASV1 */
+ {1050000, 1000000, 925000, 887500, 875000}, /* ASV2 */
+ {1050000, 1000000, 912500, 887500, 875000}, /* ASV3 */
+ {1062500, 1012500, 925000, 900000, 875000}, /* ASV4 */
+ {1050000, 1000000, 925000, 887500, 875000}, /* ASV5 */
+ {1050000, 1000000, 912500, 887500, 875000}, /* ASV6 */
+ {1037500, 987500, 912500, 875000, 875000}, /* ASV7 */
+ {1037500, 987500, 900000, 875000, 875000}, /* ASV8 */
+ {1037500, 987500, 900000, 875000, 875000}, /* ASV9 */
+ {1037500, 987500, 900000, 862500, 850000}, /* ASV10 */
+ {1035000, 975000, 887500, 850000, 850000}, /* RESERVED */
+};
+
+static unsigned int exynos4412_mif_volt[][_LV_END] = {
+ /* 400 267 160 133 100 */
+ {1100000, 1000000, 950000, 950000, 950000}, /* RESERVED */
+ {1050000, 950000, 900000, 900000, 900000}, /* RESERVED */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV3 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV4 */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV5 */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV6 */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV7 */
+ {1000000, 950000, 900000, 900000, 900000}, /* ASV8 */
+ {1000000, 950000, 900000, 900000, 850000}, /* ASV9 */
+ {1000000, 900000, 900000, 900000, 850000}, /* ASV10 */
+ {1000000, 900000, 900000, 900000, 850000}, /* RESERVED */
+};
+
+static unsigned int exynos4412_int_volt[][_LV_END] = {
+ /* GDR : 266 200 160 133 100 */
+ {1112500, 1062500, 975000, 937500, 900000}, /* RESERVED */
+ {1100000, 1050000, 962500, 925000, 887500}, /* RESERVED */
+ {1075000, 1025000, 937500, 912500, 875000}, /* ASV2 */
+ {1062500, 1012500, 937500, 900000, 862500}, /* ASV3 */
+ {1062500, 1012500, 925000, 900000, 862500}, /* ASV4 */
+ {1050000, 1000000, 925000, 887500, 850000}, /* ASV5 */
+ {1050000, 1000000, 912500, 875000, 850000}, /* ASV6 */
+ {1037500, 987500, 912500, 862500, 850000}, /* ASV7 */
+ {1037500, 987500, 900000, 862500, 850000}, /* ASV8 */
+ {1037500, 987500, 900000, 862500, 850000}, /* ASV9 */
+ {1037500, 987500, 900000, 862500, 850000}, /* ASV10 */
+ {1025000, 975000, 887500, 850000, 850000}, /* RESERVED */
+};
+
+static unsigned int exynos4x12_qos_value[][4] = {
+ {0x00, 0x00, 0x00, 0x00}, /* 400 */
+ {0x00, 0x00, 0x00, 0x00}, /* 267 */
+ {0x06, 0x03, 0x06, 0x0e}, /* 160 */
+ {0x06, 0x03, 0x06, 0x0e}, /* 133 */
+ {0x03, 0x0B, 0x00, 0x00}, /* 100 */
+};
+
+static unsigned int exynos4x12_timingrow[] = {
+ 0x34498691, 0x24488490, 0x154882D0, 0x154882D0, 0x0D488210,
+};
+
+/*** Clock Divider Data for Exynos4210 ***/
+static unsigned int exynos4210_clkdiv_dmc0[][8] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+ */
+
+ /* DMC L0: 400MHz */
+ { 3, 1, 1, 1, 1, 1, 3, 1 },
+ /* DMC L1: 266.7MHz */
+ { 4, 1, 1, 2, 1, 1, 3, 1 },
+ /* DMC L2: 133MHz */
+ { 5, 1, 1, 5, 1, 1, 3, 1 },
+};
+static unsigned int exynos4210_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+ */
+ /* ACLK200 L0: 200MHz */
+ { 3, 7, 4, 5, 1 },
+ /* ACLK200 L1: 160MHz */
+ { 4, 7, 5, 6, 1 },
+ /* ACLK200 L2: 133MHz */
+ { 5, 7, 7, 7, 1 },
+};
+static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+ /* ACLK_GDL/R L1: 200MHz */
+ { 3, 1 },
+ /* ACLK_GDL/R L2: 160MHz */
+ { 4, 1 },
+ /* ACLK_GDL/R L3: 133MHz */
+ { 5, 1 },
+};
+
+/*** Clock Divider Data for Exynos4212/4412 ***/
+static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP}
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1, 1, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 1, 1, 2, 1, 1},
+ /* DMC L2: 160MHz */
+ {5, 1, 1, 4, 1, 1},
+ /* DMC L3: 133MHz */
+ {5, 1, 1, 5, 1, 1},
+ /* DMC L4: 100MHz */
+ {7, 1, 1, 7, 1, 1},
+};
+static unsigned int exynos4x12_clkdiv_dmc1[][3] = {
+ /*
+ * Clock divider value for following
+ * { G2DACP, DIVC2C, DIVC2C_ACLK }
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 2, 1},
+ /* DMC L2: 160MHz */
+ {5, 4, 1},
+ /* DMC L3: 133MHz */
+ {5, 5, 1},
+ /* DMC L4: 100MHz */
+ {7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
+ DIVACLK133, DIVONENAND }
+ */
+
+ /* ACLK_GDL/R L0: 266MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {5, 7, 5, 7, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 7, 7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_l_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL, DIVGPL }
+ */
+
+ /* ACLK_GDL L0: 266MHz */
+ {3, 1},
+ /* ACLK_GDL L1: 200MHz */
+ {3, 1},
+ /* ACLK_GDL L2: 160MHz */
+ {4, 1},
+ /* ACLK_GDL L3: 133MHz */
+ {5, 1},
+ /* ACLK_GDL L4: 100MHz */
+ {7, 1},
+};
+static unsigned int exynos4x12_clkdiv_r_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL, DIVGPL }
+ */
+
+ /* ACLK_GDR L0: 266MHz */
+ {2, 1},
+ /* ACLK_GDR L1: 200MHz */
+ {3, 1},
+ /* ACLK_GDR L2: 160MHz */
+ {4, 1},
+ /* ACLK_GDR L3: 133MHz */
+ {5, 1},
+ /* ACLK_GDR L4: 100MHz */
+ {7, 1},
+};
+static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
+ /*
+ * Clock divider value for following
+ * { DIVMFC, DIVJPEG, DIVFIMC0~3}
+ */
+
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 160MHz */
+ {4, 4, 5},
+ /* SCLK_MFC: 133MHz */
+ {5, 5, 5},
+ /* SCLK_MFC: 100MHz */
+ {7, 7, 7},
+};
+
+
+static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4210_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ break;
+
+ if (index == EX4210_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - TOP */
+ tmp = data->top_divtable[index];
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
+
+ tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ return 0;
+}
+
+static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4x12_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ break;
+
+ if (index == EX4x12_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x111111);
+
+ /* Change Divider - DMC1 */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_DMC1);
+
+ tmp &= ~(EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK |
+ EXYNOS4_CLKDIV_DMC1_C2C_MASK |
+ EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
+ EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][1] <<
+ EXYNOS4_CLKDIV_DMC1_C2C_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][2] <<
+ EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_DMC1);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC1);
+ } while (tmp & 0x1011);
+
+ /* Change Divider - TOP */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
+
+ tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
+ EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_top[index][0] <<
+ EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
+ (exynos4x12_clkdiv_top[index][1] <<
+ EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4x12_clkdiv_top[index][2] <<
+ EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4x12_clkdiv_top[index][3] <<
+ EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4x12_clkdiv_top[index][4] <<
+ EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
+
+ tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_l_bus[index][0] <<
+ EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_l_bus[index][1] <<
+ EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_r_bus[index][0] <<
+ EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_r_bus[index][1] <<
+ EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - MFC */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_MFC);
+
+ tmp &= ~(EXYNOS4_CLKDIV_MFC_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
+ EXYNOS4_CLKDIV_MFC_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_MFC);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_MFC);
+ } while (tmp & 0x1);
+
+ /* Change Divider - JPEG */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_CAM1);
+
+ tmp &= ~(EXYNOS4_CLKDIV_CAM1_JPEG_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
+ EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_CAM1);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1);
+
+ /* Change Divider - FIMC0~3 */
+ tmp = __raw_readl(EXYNOS4_CLKDIV_CAM);
+
+ tmp &= ~(EXYNOS4_CLKDIV_CAM_FIMC0_MASK | EXYNOS4_CLKDIV_CAM_FIMC1_MASK |
+ EXYNOS4_CLKDIV_CAM_FIMC2_MASK | EXYNOS4_CLKDIV_CAM_FIMC3_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
+ EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT));
+
+ __raw_writel(tmp, EXYNOS4_CLKDIV_CAM);
+
+ do {
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1111);
+
+ if (soc_is_exynos4412() && (exynos_result_of_asv > 3)) {
+ if (index == LV_4) { /* MIF: 100, INT: 100 */
+ exynos4x12_set_abb_member(ABB_INT, ABB_MODE_100V);
+ exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_100V);
+ } else {
+ exynos4x12_set_abb_member(ABB_INT, ABB_MODE_130V);
+ exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_130V);
+ }
+ }
+
+ return 0;
+}
+
+
+static void busfreq_mon_reset(struct busfreq_data *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+
+ /* Reset PPMU */
+ __raw_writel(0x8000000f, ppmu_base + 0xf010);
+ __raw_writel(0x8000000f, ppmu_base + 0xf050);
+ __raw_writel(0x6, ppmu_base + 0xf000);
+ __raw_writel(0x0, ppmu_base + 0xf100);
+
+ /* Set PPMU Event */
+ data->dmc[i].event = 0x6;
+ __raw_writel(((data->dmc[i].event << 12) | 0x1),
+ ppmu_base + 0xfc);
+
+ /* Start PPMU */
+ __raw_writel(0x1, ppmu_base + 0xf000);
+ }
+}
+
+static void exynos4210_read_ppmu(struct busfreq_data *data)
+{
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+ u32 overflow;
+
+ /* Stop PPMU */
+ __raw_writel(0x0, ppmu_base + 0xf000);
+
+ /* Update local data from PPMU */
+ overflow = __raw_readl(ppmu_base + 0xf050);
+
+ data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
+ data->dmc[i].ccnt_overflow = overflow & (1 << 31);
+
+ for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
+ data->dmc[i].count[j] = __raw_readl(
+ ppmu_base + (0xf110 + (0x10 * j)));
+ data->dmc[i].count_overflow[j] = overflow & (1 << j);
+ }
+ }
+
+ busfreq_mon_reset(data);
+}
+
+static int exynos4x12_get_dev_status(struct busfreq_data *data,
+ struct devfreq_dev_status *stat)
+{
+ int id;
+ unsigned long long busy, total;
+
+ ppmu_update(data->dev, 3);
+
+ if (ppmu_load[PPMU_DMC0] > ppmu_load[PPMU_DMC1])
+ id = PPMU_DMC0;
+ else
+ id = PPMU_DMC1;
+
+ busy = ppmu_load_detail[0][id];
+ total = ppmu_load_detail[1][id];
+
+ while (total > (1ULL << 16)) {
+ busy >>= 8;
+ total >>= 8;
+ }
+ busy *= 100;
+ total *= BUS_SATURATION_RATIO_4x12;
+
+ stat->busy_time = busy;
+ stat->total_time = total;
+
+ ppmu_start(data->dev);
+ return 0;
+}
+
+static int exynos4x12_get_intspec(unsigned long mifclk)
+{
+ switch (mifclk) {
+ case 400000:
+ return LV_0; /* 266000 */
+ case 267000:
+ return LV_1; /* 200000 */
+ case 160000:
+ return LV_2; /* 160000 */
+ case 133000:
+ return LV_3; /* 133000 */
+ case 100000:
+ return LV_4; /* 100000 */
+ }
+
+ return -EINVAL;
+}
+
+static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
+ struct opp *oldopp)
+{
+ int err = 0, tmp;
+ unsigned long volt = opp_get_voltage(opp);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ /* OPP represents DMC clock + INT voltage */
+ err = regulator_set_voltage(data->vdd_int, volt,
+ MAX_SAFEVOLT);
+ break;
+ case TYPE_BUSF_EXYNOS4212:
+ case TYPE_BUSF_EXYNOS4412:
+ /* OPP represents MIF clock + MIF voltage */
+ err = regulator_set_voltage(data->vdd_mif, volt,
+ MAX_SAFEVOLT);
+ if (err)
+ break;
+
+ tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ if (tmp < 0) {
+ err = tmp;
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ }
+ err = regulator_set_voltage(data->vdd_int,
+ exynos4x12_intclk_table[tmp].volt,
+ MAX_SAFEVOLT);
+ /* Try to recover */
+ if (err)
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+
+/**
+ * exynos4x12_set_qos() - Apply QoS registers (GDL/GDR)
+ * @data:
+ * @opp:
+ */
+static void exynos4x12_set_qos(struct busfreq_data *data, struct opp *opp)
+{
+ int index;
+
+ switch (opp_get_freq(opp)) {
+ case 400000:
+ index = 0;
+ break;
+ case 267000:
+ index = 1;
+ break;
+ case 160000:
+ index = 2;
+ break;
+ case 133000:
+ index = 3;
+ break;
+ case 100000:
+ index = 4;
+ break;
+ default:
+ dev_err(data->dev, "Incorrect OPP configuration.\n");
+ return;
+ }
+
+ __raw_writel(exynos4x12_qos_value[index][0], S5P_VA_GDL + 0x400);
+ __raw_writel(exynos4x12_qos_value[index][1], S5P_VA_GDL + 0x404);
+ __raw_writel(exynos4x12_qos_value[index][2], S5P_VA_GDR + 0x400);
+ __raw_writel(exynos4x12_qos_value[index][3], S5P_VA_GDR + 0x404);
+}
+
+static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
+ u32 options)
+{
+ int err = 0;
+ unsigned long def = *_freq;
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ struct opp *opp = devfreq_recommended_opp(dev, _freq, options &
+ DEVFREQ_OPTION_FREQ_GLB);
+ unsigned long freq = opp_get_freq(opp);
+ unsigned long old_freq = opp_get_freq(data->curr_opp);
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ if (old_freq == freq)
+ return 0;
+
+ dev_dbg(dev, "targetting %lukHz %luuV (%luuV)\n", freq, opp_get_voltage(opp), def);
+
+ mutex_lock(&data->lock);
+
+ if (data->disabled)
+ goto out;
+
+ if (old_freq < freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ if (old_freq != freq) {
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4212:
+ case TYPE_BUSF_EXYNOS4412:
+ exynos4x12_set_qos(data, opp);
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ goto out;
+
+ if (old_freq > freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ data->curr_opp = opp;
+out:
+ mutex_unlock(&data->lock);
+ return err;
+}
+
+static int exynos4_get_busier_dmc(struct busfreq_data *data)
+{
+ u64 p0 = data->dmc[0].count[0];
+ u64 p1 = data->dmc[1].count[0];
+
+ p0 *= data->dmc[1].ccnt;
+ p1 *= data->dmc[0].ccnt;
+
+ if (data->dmc[1].ccnt == 0)
+ return 0;
+
+ if (p0 > p1)
+ return 0;
+ return 1;
+}
+
+static int exynos4_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ int busier_dmc;
+ int cycles_x2 = 2; /* 2 x cycles */
+ void __iomem *addr;
+ u32 timing;
+ u32 memctrl;
+
+ stat->current_frequency = opp_get_freq(data->curr_opp);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ exynos4210_read_ppmu(data);
+ break;
+ case TYPE_BUSF_EXYNOS4212:
+ case TYPE_BUSF_EXYNOS4412:
+ return exynos4x12_get_dev_status(data, stat);
+ break;
+ default:
+ return -EINVAL;
+ }
+ busier_dmc = exynos4_get_busier_dmc(data);
+
+ if (busier_dmc)
+ addr = S5P_VA_DMC1;
+ else
+ addr = S5P_VA_DMC0;
+
+ memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
+ timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
+
+ switch ((memctrl >> 8) & 0xf) {
+ case 0x4: /* DDR2 */
+ cycles_x2 = ((timing >> 16) & 0xf) * 2;
+ break;
+ case 0x5: /* LPDDR2 */
+ case 0x6: /* DDR3 */
+ cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
+ break;
+ default:
+ pr_err("%s: Unknown Memory Type(%d).\n", __func__,
+ (memctrl >> 8) & 0xf);
+ return -EINVAL;
+ }
+
+ /* Number of cycles spent on memory access */
+ stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
+ stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+ stat->total_time = data->dmc[busier_dmc].ccnt;
+
+ pr_debug("%lu/%lu\n", stat->busy_time, stat->total_time);
+
+ /* If the counters have overflown, retry */
+ if (data->dmc[busier_dmc].ccnt_overflow ||
+ data->dmc[busier_dmc].count_overflow[0])
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void exynos4_bus_exit(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ devfreq_unregister_opp_notifier(dev, data->devfreq);
+}
+
+static struct devfreq_dev_profile exynos4_devfreq_profile = {
+ .initial_freq = 400000,
+ .polling_ms = 50,
+ .target = exynos4_bus_target,
+ .get_dev_status = exynos4_bus_get_dev_status,
+ .exit = exynos4_bus_exit,
+};
+
+static int exynos4210_init_tables(struct busfreq_data *data)
+{
+ u32 tmp;
+ int mgrp;
+ int i, err = 0;
+
+ tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
+ EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
+ EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMC_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMCP_MASK |
+ EXYNOS4_CLKDIV_DMC0_COPY2_MASK |
+ EXYNOS4_CLKDIV_DMC0_CORETI_MASK);
+
+ tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
+ EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][1] <<
+ EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][2] <<
+ EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][3] <<
+ EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][4] <<
+ EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][5] <<
+ EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][6] <<
+ EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][7] <<
+ EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+ tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK200_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
+ EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4210_clkdiv_top[i][0] <<
+ EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT) |
+ (exynos4210_clkdiv_top[i][1] <<
+ EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4210_clkdiv_top[i][2] <<
+ EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4210_clkdiv_top[i][3] <<
+ EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4210_clkdiv_top[i][4] <<
+ EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
+
+ data->top_divtable[i] = tmp;
+ }
+
+ tmp = exynos_result_of_asv;
+
+ pr_debug("ASV Group of Exynos4 is %d\n", tmp);
+ /* Use merged grouping for voltage */
+ switch (tmp) {
+ case 0:
+ mgrp = 0;
+ break;
+ case 1:
+ case 2:
+ mgrp = 1;
+ break;
+ case 3:
+ case 4:
+ mgrp = 2;
+ break;
+ case 5:
+ case 6:
+ mgrp = 3;
+ break;
+ case 7:
+ mgrp = 4;
+ break;
+ default:
+ pr_warn("Unknown ASV Group. Use max voltage.\n");
+ mgrp = 0;
+ }
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++)
+ exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+ exynos4210_busclk_table[i].volt);
+ if (err) {
+ dev_err(data->dev, "Cannot add opp entries.\n");
+ return err;
+ }
+ }
+
+
+ return 0;
+}
+
+static int exynos4x12_init_tables(struct busfreq_data *data)
+{
+ unsigned int i;
+ unsigned int tmp;
+ int ret;
+
+ /* Enable pause function for DREX2 DVFS */
+ tmp = __raw_readl(EXYNOS4_DMC_PAUSE_CTRL);
+ tmp |= DMC_PAUSE_ENABLE;
+ __raw_writel(tmp, EXYNOS4_DMC_PAUSE_CTRL);
+
+ tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
+ EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
+ EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMC_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMCP_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
+ EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][1] <<
+ EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][2] <<
+ EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][3] <<
+ EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][4] <<
+ EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][5] <<
+ EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+ tmp = exynos_result_of_asv;
+
+ if (data->type == TYPE_BUSF_EXYNOS4212) {
+ /* EXYNOS 4212 */
+ if (tmp >= ARRAY_SIZE(exynos4212_mif_volt))
+ tmp = 0;
+ pr_info("ASV Group of Exynos4212 is %d\n", tmp);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ exynos4x12_mifclk_table[i].volt =
+ exynos4212_mif_volt[tmp][i];
+ exynos4x12_intclk_table[i].volt =
+ exynos4212_int_volt[tmp][i];
+ }
+ } else {
+ /* EXYNOS 4412 */
+ if (tmp >= ARRAY_SIZE(exynos4412_mif_volt))
+ tmp = 0;
+ pr_info("ASV Group of Exynos4412 is %d\n", tmp);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ exynos4x12_mifclk_table[i].volt =
+ exynos4412_mif_volt[tmp][i];
+ exynos4x12_intclk_table[i].volt =
+ exynos4412_int_volt[tmp][i];
+ }
+ }
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+ exynos4x12_mifclk_table[i].volt);
+ if (ret) {
+ dev_err(data->dev, "Fail to add opp entries.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#define TIMINGROW_OFFSET 0x34
+static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct busfreq_data *data = container_of(this, struct busfreq_data,
+ pm_notifier);
+ struct opp *opp;
+ unsigned long maxfreq = ULONG_MAX;
+ int err = 0;
+ unsigned int timing0;
+ unsigned int index;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ /* Set Fastest and Deactivate DVFS */
+ mutex_lock(&data->lock);
+
+ data->disabled = true;
+
+ opp = opp_find_freq_floor(data->dev, &maxfreq);
+
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto unlock;
+
+ for (index = LV_0; index < EX4x12_LV_NUM; index++)
+ if (opp_get_freq(opp) ==
+ exynos4x12_mifclk_table[index].clk)
+ break;
+
+#if 0
+ if (data->type == TYPE_BUSF_EXYNOS4212 ||
+ data->type == TYPE_BUSF_EXYNOS4412) {
+ timing0 = __raw_readl(S5P_VA_DMC0 + TIMINGROW_OFFSET);
+ timing0 |= exynos4x12_timingrow[index];
+ __raw_writel(timing0, S5P_VA_DMC0 + TIMINGROW_OFFSET);
+ __raw_writel(exynos4x12_timingrow[index],
+ S5P_VA_DMC0 + TIMINGROW_OFFSET);
+ __raw_writel(timing0, S5P_VA_DMC1 + TIMINGROW_OFFSET);
+ __raw_writel(exynos4x12_timingrow[index],
+ S5P_VA_DMC1 + TIMINGROW_OFFSET);
+ }
+#endif
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4212:
+ case TYPE_BUSF_EXYNOS4412:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto unlock;
+
+ data->curr_opp = opp;
+unlock:
+ mutex_unlock(&data->lock);
+ if (err)
+ return err;
+ return NOTIFY_OK;
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ /* Reactivate */
+ mutex_lock(&data->lock);
+ data->disabled = false;
+ mutex_unlock(&data->lock);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* For Exynos4x12, if CPU >= 1GHz, mem/bus should be >= 160Mhz */
+static int exynos4x12_cpuf_notify(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct busfreq_data *data = container_of(this, struct busfreq_data,
+ cpuf_notifier);
+ struct cpufreq_freqs *freqs = ptr;
+
+ if (!pm_qos_request_active(&data->cpuf_enforce))
+ pm_qos_add_request(&data->cpuf_enforce,
+ PM_QOS_BUS_DMA_THROUGHPUT, 0);
+
+ switch (event) {
+ case CPUFREQ_PRECHANGE:
+ if (freqs->new >= 1000000)
+ pm_qos_update_request(&data->cpuf_enforce, 160000);
+ return NOTIFY_OK;
+ case CPUFREQ_POSTCHANGE:
+ if (freqs->new < 1000000)
+ pm_qos_update_request(&data->cpuf_enforce, 0);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+{
+ struct busfreq_data *data;
+ struct opp *opp;
+ struct device *dev = &pdev->dev;
+ int err = 0;
+ struct clk *ppmu_clk;
+ struct exynos4_bus_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct devfreq_pm_qos_table *qos_list;
+ int i;
+
+ data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ data->type = pdev->id_entry->driver_data;
+ data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
+ data->dev = dev;
+ data->dmc[0].hw_base = S5P_VA_DMC0;
+ data->dmc[1].hw_base = S5P_VA_DMC1;
+ mutex_init(&data->lock);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_init_tables(data);
+ break;
+ case TYPE_BUSF_EXYNOS4212:
+ case TYPE_BUSF_EXYNOS4412:
+ err = exynos4x12_init_tables(data);
+
+ /* Enable pause function for DREX2 DVFS */
+ data->dmc_pause_ctrl = __raw_readl(EXYNOS4_DMC_PAUSE_CTRL);
+ data->dmc_pause_ctrl |= DMC_PAUSE_ENABLE;
+ __raw_writel(data->dmc_pause_ctrl, EXYNOS4_DMC_PAUSE_CTRL);
+
+ ppmu_clk = clk_get(NULL, "ppmudmc0");
+ if (IS_ERR(ppmu_clk))
+ printk(KERN_ERR "failed to get ppmu_dmc0\n");
+ clk_enable(ppmu_clk);
+ clk_put(ppmu_clk);
+
+ ppmu_clk = clk_get(NULL, "ppmudmc1");
+ if (IS_ERR(ppmu_clk))
+ printk(KERN_ERR "failed to get ppmu_dmc1\n");
+ clk_enable(ppmu_clk);
+ clk_put(ppmu_clk);
+
+ ppmu_clk = clk_get(NULL, "ppmucpu");
+ if (IS_ERR(ppmu_clk))
+ printk(KERN_ERR "failed to get ppmu_cpu\n");
+ clk_enable(ppmu_clk);
+ clk_put(ppmu_clk);
+
+ ppmu_init(&exynos_ppmu[PPMU_DMC0], dev);
+ ppmu_init(&exynos_ppmu[PPMU_DMC1], dev);
+ ppmu_init(&exynos_ppmu[PPMU_CPU], dev);
+ ppmu_start(data->dev);
+ break;
+ default:
+ dev_err(dev, "Cannot determine the device id %d\n", data->type);
+ err = -EINVAL;
+ }
+ if (err)
+ goto err_regulator;
+
+ data->vdd_int = regulator_get(dev, "vdd_int");
+ if (IS_ERR(data->vdd_int)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
+ err = PTR_ERR(data->vdd_int);
+ goto err_regulator;
+ }
+ if (data->type == TYPE_BUSF_EXYNOS4212 ||
+ data->type == TYPE_BUSF_EXYNOS4412) {
+ data->vdd_mif = regulator_get(dev, "vdd_mif");
+ if (IS_ERR(data->vdd_mif)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
+ err = PTR_ERR(data->vdd_mif);
+ regulator_put(data->vdd_int);
+ goto err_regulator;
+
+ }
+ }
+
+ opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Invalid initial frequency %lu kHz.\n",
+ exynos4_devfreq_profile.initial_freq);
+ err = PTR_ERR(opp);
+ goto err_opp_add;
+ }
+ data->curr_opp = opp;
+
+ platform_set_drvdata(pdev, data);
+
+ busfreq_mon_reset(data);
+
+ if (pdata->polling_ms)
+ exynos4_devfreq_profile.polling_ms = pdata->polling_ms;
+ else
+ exynos4_devfreq_profile.polling_ms = 50;
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ qos_list = kzalloc(sizeof(struct devfreq_pm_qos_table) *
+ (EX4210_LV_NUM + 1), GFP_KERNEL);
+ for (i = 0; i < EX4210_LV_NUM; i++) {
+ qos_list[EX4210_LV_MAX - i].freq =
+ exynos4210_busclk_table[i].clk;
+ qos_list[EX4210_LV_MAX - i].qos_value =
+ exynos4210_busclk_table[i].clk;
+ }
+ break;
+ case TYPE_BUSF_EXYNOS4212:
+ case TYPE_BUSF_EXYNOS4412:
+ qos_list = kzalloc(sizeof(struct devfreq_pm_qos_table) *
+ (EX4x12_LV_NUM + 1), GFP_KERNEL);
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ qos_list[EX4x12_LV_MAX - i].freq =
+ exynos4x12_mifclk_table[i].clk;
+ /*
+ * clk / 1000 is added in order to keep compatible
+ * with S.LSI hack (busfreq_opp)'s qos values.
+ */
+ qos_list[EX4x12_LV_MAX - i].qos_value =
+ exynos4x12_mifclk_table[i].clk +
+ (exynos4x12_mifclk_table[i].clk / 1000);
+ }
+ break;
+ default:
+ dev_err(dev, "Cannot determine the device id %d\n", data->type);
+ err = -EINVAL;
+ goto err_opp_add;
+ }
+
+ exynos4_devfreq_profile.qos_type = PM_QOS_BUS_DMA_THROUGHPUT;
+ exynos4_devfreq_profile.qos_use_max = true;
+ exynos4_devfreq_profile.qos_list = qos_list;
+
+ data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
+ &devfreq_simple_ondemand,
+ (pdata) ? (&pdata->threshold) :
+ NULL);
+
+ if (IS_ERR(data->devfreq)) {
+ err = PTR_ERR(data->devfreq);
+ goto err_profile_qos_added;
+ }
+
+ devfreq_register_opp_notifier(dev, data->devfreq);
+
+ err = register_pm_notifier(&data->pm_notifier);
+ if (err) {
+ dev_err(dev, "Failed to setup pm notifier\n");
+ goto err_devfreq_add;
+ }
+
+ if (data->type == TYPE_BUSF_EXYNOS4212 ||
+ data->type == TYPE_BUSF_EXYNOS4412) {
+ data->cpuf_notifier.notifier_call = exynos4x12_cpuf_notify;
+ err = cpufreq_register_notifier(&data->cpuf_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (err) {
+ dev_err(dev, "Failed to setup cpufreq notifier\n");
+ goto err_cpufreq_add;
+ }
+ }
+
+ return 0;
+err_cpufreq_add:
+ unregister_pm_notifier(&data->pm_notifier);
+err_devfreq_add:
+ devfreq_remove_device(data->devfreq);
+err_profile_qos_added:
+ kfree(qos_list);
+err_opp_add:
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ regulator_put(data->vdd_int);
+err_regulator:
+ kfree(data);
+ return err;
+}
+
+static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+{
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ struct clk *ppmu_clk;
+
+ if (data->type == TYPE_BUSF_EXYNOS4212 ||
+ data->type == TYPE_BUSF_EXYNOS4412) {
+ ppmu_clk = clk_get(NULL, "ppmudmc0");
+ if (IS_ERR(ppmu_clk))
+ printk(KERN_ERR "failed to get ppmu_dmc0\n");
+ clk_disable(ppmu_clk);
+ clk_put(ppmu_clk);
+
+ ppmu_clk = clk_get(NULL, "ppmudmc1");
+ if (IS_ERR(ppmu_clk))
+ printk(KERN_ERR "failed to get ppmu_dmc1\n");
+ clk_disable(ppmu_clk);
+ clk_put(ppmu_clk);
+
+ ppmu_clk = clk_get(NULL, "ppmucpu");
+ if (IS_ERR(ppmu_clk))
+ printk(KERN_ERR "failed to get ppmu_cpu\n");
+ clk_disable(ppmu_clk);
+ clk_put(ppmu_clk);
+
+ cpufreq_unregister_notifier(&data->cpuf_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ unregister_pm_notifier(&data->pm_notifier);
+ devfreq_remove_device(data->devfreq);
+ kfree(data->devfreq->profile->qos_list);
+ regulator_put(data->vdd_int);
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ kfree(data);
+
+ return 0;
+}
+
+static int exynos4_busfreq_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int exynos4_busfreq_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ ppmu_reset(data->dev);
+
+ if (data->type == TYPE_BUSF_EXYNOS4212 ||
+ data->type == TYPE_BUSF_EXYNOS4412)
+ __raw_writel(data->dmc_pause_ctrl, EXYNOS4_DMC_PAUSE_CTRL);
+
+ return 0;
+}
+
+static const struct dev_pm_ops exynos4_busfreq_pm = {
+ .suspend = exynos4_busfreq_suspend,
+ .resume = exynos4_busfreq_resume,
+};
+
+static const struct platform_device_id exynos4_busfreq_id[] = {
+ { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
+ { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4412 },
+ { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4212 },
+ { },
+};
+
+static struct platform_driver exynos4_busfreq_driver = {
+ .probe = exynos4_busfreq_probe,
+ .remove = __devexit_p(exynos4_busfreq_remove),
+ .id_table = exynos4_busfreq_id,
+ .driver = {
+ .name = "exynos-busfreq",
+ .owner = THIS_MODULE,
+ .pm = &exynos4_busfreq_pm,
+ },
+};
+
+static int __init exynos4_busfreq_init(void)
+{
+ return platform_driver_register(&exynos4_busfreq_driver);
+}
+late_initcall(exynos4_busfreq_init);
+
+static void __exit exynos4_busfreq_exit(void)
+{
+ platform_driver_unregister(&exynos4_busfreq_driver);
+}
+module_exit(exynos4_busfreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("exynos-busfreq");
diff --git a/drivers/devfreq/exynos4_display.c b/drivers/devfreq/exynos4_display.c
new file mode 100644
index 0000000..c26124de
--- /dev/null
+++ b/drivers/devfreq/exynos4_display.c
@@ -0,0 +1,388 @@
+/* drivers/devfreq/exynos4_display.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ * Myungjoo Ham <myungjoo.ham@samsung.com>
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * EXYNOS4 - Dynamic LCD refresh rate support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/opp.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq/exynos4_display.h>
+#include <linux/pm_qos_params.h>
+
+#define EXYNOS4_DISPLAY_ON 1
+#define EXYNOS4_DISPLAY_OFF 0
+
+#define DEFAULT_DELAY_TIME 1000 /* us (millisecond) */
+
+enum exynos4_display_type {
+ TYPE_DISPLAY_EXYNOS4210,
+ TYPE_DISPLAY_EXYNOS4x12,
+};
+
+struct exynos4_display_data {
+ enum exynos4_display_type type;
+ struct devfreq *devfreq;
+ struct opp *curr_opp;
+ struct device *dev;
+
+ struct delayed_work wq_lowfreq;
+
+ struct notifier_block nb_pm;
+
+ unsigned int state;
+ struct mutex lock;
+};
+
+/* Define frequency level */
+enum display_clk_level_idx {
+ LV_0 = 0,
+ LV_1,
+ _LV_END
+};
+
+/* Define opp table which include various frequency level */
+struct display_opp_table {
+ unsigned int idx;
+ unsigned long clk;
+ unsigned long volt;
+};
+
+static struct display_opp_table exynos4_display_clk_table[] = {
+ {LV_0, 40, 0 },
+ {LV_1, 60, 0 },
+ {0, 0, 0 },
+};
+
+/*
+ * The exynos-display driver send newly frequency to display client
+ * if it receive event from sender device.
+ * List of display client device
+ * - FIMD and so on
+ */
+static BLOCKING_NOTIFIER_HEAD(exynos4_display_notifier_client_list);
+
+int exynos4_display_register_client(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &exynos4_display_notifier_client_list, nb);
+}
+EXPORT_SYMBOL(exynos4_display_register_client);
+
+int exynos4_display_unregister_client(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &exynos4_display_notifier_client_list, nb);
+}
+EXPORT_SYMBOL(exynos4_display_unregister_client);
+
+static int exynos4_display_send_event_to_display(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &exynos4_display_notifier_client_list, val, v);
+}
+
+/*
+ * Register exynos-display as client to pm notifer
+ * - This callback gets called when something important happens in pm state.
+ */
+static int exynos4_display_pm_notifier_callback(struct notifier_block *this,
+ unsigned long event, void *_data)
+{
+ struct exynos4_display_data *data = container_of(this,
+ struct exynos4_display_data, nb_pm);
+
+ if (data->state == EXYNOS4_DISPLAY_OFF)
+ return NOTIFY_OK;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ mutex_lock(&data->lock);
+ data->state = EXYNOS4_DISPLAY_OFF;
+ mutex_unlock(&data->lock);
+
+ if (delayed_work_pending(&data->wq_lowfreq))
+ cancel_delayed_work(&data->wq_lowfreq);
+
+ return NOTIFY_OK;
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ mutex_lock(&data->lock);
+ data->state = EXYNOS4_DISPLAY_ON;
+ mutex_unlock(&data->lock);
+
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * Enable/disable exynos-display operation
+ */
+static void exynos4_display_disable(struct exynos4_display_data *data)
+{
+ struct opp *opp;
+ unsigned long freq = EXYNOS4_DISPLAY_LV_DEFAULT;
+
+ /* Cancel workqueue which set low frequency of display client
+ * if it is pending state before executing workqueue. */
+ if (delayed_work_pending(&data->wq_lowfreq))
+ cancel_delayed_work(&data->wq_lowfreq);
+
+ /* Set high frequency(default) of display client */
+ exynos4_display_send_event_to_display(freq, NULL);
+
+ mutex_lock(&data->lock);
+ data->state = EXYNOS4_DISPLAY_OFF;
+ mutex_unlock(&data->lock);
+
+ /* Find opp object with high frequency */
+ opp = opp_find_freq_floor(data->dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(data->dev,
+ "invalid initial frequency %lu kHz.\n", freq);
+ } else
+ data->curr_opp = opp;
+}
+
+static void exynos4_display_enable(struct exynos4_display_data *data)
+{
+ data->state = EXYNOS4_DISPLAY_ON;
+}
+
+/*
+ * Timer to set display with low frequency state after 1 second
+ */
+static void exynos4_display_set_lowfreq(struct work_struct *work)
+{
+ exynos4_display_send_event_to_display(EXYNOS4_DISPLAY_LV_LF, NULL);
+}
+
+/*
+ * Send event to display client for changing frequency when DVFREQ framework
+ * update state of device
+ */
+static int exynos4_display_profile_target(struct device *dev,
+ unsigned long *_freq, u32 options)
+{
+ /* Inform display client of new frequency */
+ struct exynos4_display_data *data = dev_get_drvdata(dev);
+ struct opp *opp = devfreq_recommended_opp(dev, _freq, options &
+ DEVFREQ_OPTION_FREQ_GLB);
+ unsigned long old_freq = opp_get_freq(data->curr_opp);
+ unsigned long new_freq = opp_get_freq(opp);
+
+ /* TODO: No longer use fb notifier to identify LCD on/off state and
+ have yet alternative feature of it. So, exynos4-display change
+ refresh rate of display clinet irrespective of LCD state until
+ proper feature will be implemented. */
+ if (old_freq == new_freq)
+ return 0;
+
+ opp = opp_find_freq_floor(dev, &new_freq);
+ data->curr_opp = opp;
+
+ switch (new_freq) {
+ case EXYNOS4_DISPLAY_LV_HF:
+ if (delayed_work_pending(&data->wq_lowfreq))
+ cancel_delayed_work(&data->wq_lowfreq);
+
+ exynos4_display_send_event_to_display(
+ EXYNOS4_DISPLAY_LV_HF, NULL);
+ break;
+ case EXYNOS4_DISPLAY_LV_LF:
+ schedule_delayed_work(&data->wq_lowfreq,
+ msecs_to_jiffies(DEFAULT_DELAY_TIME));
+ break;
+ }
+
+ printk(KERN_DEBUG "exynos4-display: request %ldHz from \'%s\'\n",
+ new_freq, dev_name(dev));
+
+ return 0;
+}
+
+static void exynos4_display_profile_exit(struct device *dev)
+{
+ /* TODO */
+}
+
+static struct devfreq_dev_profile exynos4_display_profile = {
+ .initial_freq = EXYNOS4_DISPLAY_LV_DEFAULT,
+ .target = exynos4_display_profile_target,
+ .exit = exynos4_display_profile_exit,
+};
+
+static int exynos4_display_probe(struct platform_device *pdev)
+{
+ struct exynos4_display_data *data;
+ struct device *dev = &pdev->dev;
+ struct opp *opp;
+ int ret = 0;
+ int i;
+ struct devfreq_pm_qos_table *qos_list;
+
+ data = kzalloc(sizeof(struct exynos4_display_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+ data->dev = dev;
+ data->state = EXYNOS4_DISPLAY_ON;
+ mutex_init(&data->lock);
+
+ /* Register OPP entries */
+ for (i = 0 ; i < _LV_END ; i++) {
+ ret = opp_add(dev, exynos4_display_clk_table[i].clk,
+ exynos4_display_clk_table[i].volt);
+ if (ret) {
+ dev_err(dev, "cannot add opp entries.\n");
+ goto err_alloc_mem;
+ }
+ }
+
+ /* Find opp object with init frequency */
+ opp = opp_find_freq_floor(dev, &exynos4_display_profile.initial_freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "invalid initial frequency %lu kHz.\n",
+ exynos4_display_profile.initial_freq);
+ ret = PTR_ERR(opp);
+ goto err_alloc_mem;
+ }
+ data->curr_opp = opp;
+
+ /* Initialize QoS */
+ qos_list = kzalloc(sizeof(struct devfreq_pm_qos_table) * _LV_END,
+ GFP_KERNEL);
+ for (i = 0 ; i < _LV_END ; i++) {
+ qos_list[i].freq = exynos4_display_clk_table[i].clk;
+ qos_list[i].qos_value = exynos4_display_clk_table[i].clk;
+ }
+ exynos4_display_profile.qos_type = PM_QOS_DISPLAY_FREQUENCY;
+ exynos4_display_profile.qos_use_max = true;
+ exynos4_display_profile.qos_list = qos_list;
+
+ /* Register exynos4_display to DEVFREQ framework */
+ data->devfreq = devfreq_add_device(dev, &exynos4_display_profile,
+ &devfreq_powersave, NULL);
+ if (IS_ERR(data->devfreq)) {
+ ret = PTR_ERR(data->devfreq);
+ dev_err(dev,
+ "failed to add exynos4 lcd to DEVFREQ : %d\n", ret);
+ goto err_alloc_mem;
+ }
+ devfreq_register_opp_notifier(dev, data->devfreq);
+
+ /* Register exynos4_display as client to pm notifier */
+ memset(&data->nb_pm, 0, sizeof(data->nb_pm));
+ data->nb_pm.notifier_call = exynos4_display_pm_notifier_callback;
+ ret = register_pm_notifier(&data->nb_pm);
+ if (ret < 0) {
+ dev_err(dev, "failed to get pm notifier: %d\n", ret);
+ goto err_add_devfreq;
+ }
+
+ INIT_DELAYED_WORK(&data->wq_lowfreq, exynos4_display_set_lowfreq);
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+err_add_devfreq:
+ devfreq_remove_device(data->devfreq);
+err_alloc_mem:
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit exynos4_display_remove(struct platform_device *pdev)
+{
+ struct exynos4_display_data *data = pdev->dev.platform_data;
+
+ unregister_pm_notifier(&data->nb_pm);
+ exynos4_display_disable(data);
+
+ devfreq_remove_device(data->devfreq);
+
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int exynos4_display_suspend(struct device *dev)
+{
+ /* TODO */
+ return 0;
+}
+
+static int exynos4_display_resume(struct device *dev)
+{
+ /* TODO */
+ return 0;
+}
+
+static const struct dev_pm_ops exynos4_display_dev_pm_ops = {
+ .suspend = exynos4_display_suspend,
+ .resume = exynos4_display_resume,
+};
+
+#define exynos4_display_DEV_PM_OPS (&exynos4_display_dev_pm_ops)
+#else
+#define exynos4_display_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct platform_device_id exynos4_display_ids[] = {
+ { "exynos4210-display", TYPE_DISPLAY_EXYNOS4210 },
+ { "exynos4212-display", TYPE_DISPLAY_EXYNOS4x12 },
+ { "exynos4412-display", TYPE_DISPLAY_EXYNOS4x12 },
+ { },
+};
+
+static struct platform_driver exynos4_display_driver = {
+ .probe = exynos4_display_probe,
+ .remove = __devexit_p(exynos4_display_remove),
+ .id_table = exynos4_display_ids,
+ .driver = {
+ .name = "exynos4-display",
+ .owner = THIS_MODULE,
+ .pm = exynos4_display_DEV_PM_OPS,
+ },
+};
+
+static int __init exynos4_display_init(void)
+{
+ return platform_driver_register(&exynos4_display_driver);
+}
+late_initcall(exynos4_display_init);
+
+static void __exit exynos4_display_exit(void)
+{
+ platform_driver_unregister(&exynos4_display_driver);
+}
+module_exit(exynos4_display_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EXYNOS4 display driver with devfreq framework");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("Myungjoo Ham <myungjoo.ham@samsung.com>");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_ALIAS("exynos-display");
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
new file mode 100644
index 0000000..ea7f13c
--- /dev/null
+++ b/drivers/devfreq/governor.h
@@ -0,0 +1,24 @@
+/*
+ * governor.h - internal header for devfreq governors.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This header is for devfreq governors in drivers/devfreq/
+ */
+
+#ifndef _GOVERNOR_H
+#define _GOVERNOR_H
+
+#include <linux/devfreq.h>
+
+#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
+
+/* Caution: devfreq->lock must be locked before calling update_devfreq */
+extern int update_devfreq(struct devfreq *devfreq);
+
+#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
new file mode 100644
index 0000000..574a06b
--- /dev/null
+++ b/drivers/devfreq/governor_performance.c
@@ -0,0 +1,32 @@
+/*
+ * linux/drivers/devfreq/governor_performance.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/devfreq.h>
+
+static int devfreq_performance_func(struct devfreq *df,
+ unsigned long *freq)
+{
+ /*
+ * target callback should be able to get floor value as
+ * said in devfreq.h
+ */
+ if (!df->max_freq)
+ *freq = UINT_MAX;
+ else
+ *freq = df->max_freq;
+ return 0;
+}
+
+const struct devfreq_governor devfreq_performance = {
+ .name = "performance",
+ .get_target_freq = devfreq_performance_func,
+ .no_central_polling = true,
+};
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
new file mode 100644
index 0000000..d742d4a
--- /dev/null
+++ b/drivers/devfreq/governor_powersave.c
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/devfreq/governor_powersave.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/devfreq.h>
+
+static int devfreq_powersave_func(struct devfreq *df,
+ unsigned long *freq)
+{
+ /*
+ * target callback should be able to get ceiling value as
+ * said in devfreq.h
+ */
+ *freq = df->min_freq;
+ return 0;
+}
+
+const struct devfreq_governor devfreq_powersave = {
+ .name = "powersave",
+ .get_target_freq = devfreq_powersave_func,
+ .no_central_polling = true,
+};
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
new file mode 100644
index 0000000..a2e3eae
--- /dev/null
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -0,0 +1,94 @@
+/*
+ * linux/drivers/devfreq/governor_simpleondemand.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/devfreq.h>
+#include <linux/math64.h>
+
+/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
+#define DFSO_UPTHRESHOLD (90)
+#define DFSO_DOWNDIFFERENCTIAL (5)
+static int devfreq_simple_ondemand_func(struct devfreq *df,
+ unsigned long *freq)
+{
+ struct devfreq_dev_status stat;
+ int err = df->profile->get_dev_status(df->dev.parent, &stat);
+ unsigned long long a, b;
+ unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
+ unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
+ struct devfreq_simple_ondemand_data *data = df->data;
+ unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
+
+ if (err)
+ return err;
+
+ if (data) {
+ if (data->upthreshold)
+ dfso_upthreshold = data->upthreshold;
+ if (data->downdifferential)
+ dfso_downdifferential = data->downdifferential;
+ }
+ if (dfso_upthreshold > 100 ||
+ dfso_upthreshold < dfso_downdifferential)
+ return -EINVAL;
+
+ /* Assume MAX if it is going to be divided by zero */
+ if (stat.total_time == 0) {
+ *freq = max;
+ return 0;
+ }
+
+ /* Prevent overflow */
+ if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
+ stat.busy_time >>= 7;
+ stat.total_time >>= 7;
+ }
+
+ /* Set MAX if it's busy enough */
+ if (stat.busy_time * 100 >
+ stat.total_time * dfso_upthreshold) {
+ *freq = max;
+ return 0;
+ }
+
+ /* Set MAX if we do not know the initial frequency */
+ if (stat.current_frequency == 0) {
+ *freq = max;
+ return 0;
+ }
+
+ /* Keep the current frequency */
+ if (stat.busy_time * 100 >
+ stat.total_time * (dfso_upthreshold - dfso_downdifferential)) {
+ *freq = stat.current_frequency;
+ return 0;
+ }
+
+ /* Set the desired frequency based on the load */
+ a = stat.busy_time;
+ a *= stat.current_frequency;
+ b = div_u64(a, stat.total_time);
+ b *= 100;
+ b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
+ *freq = (unsigned long) b;
+
+ if (df->min_freq && *freq < df->min_freq)
+ *freq = df->min_freq;
+ if (df->max_freq && *freq > df->max_freq)
+ *freq = df->max_freq;
+
+ return 0;
+}
+
+const struct devfreq_governor devfreq_simple_ondemand = {
+ .name = "simple_ondemand",
+ .get_target_freq = devfreq_simple_ondemand_func,
+};
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
new file mode 100644
index 0000000..4f8b563
--- /dev/null
+++ b/drivers/devfreq/governor_userspace.c
@@ -0,0 +1,116 @@
+/*
+ * linux/drivers/devfreq/governor_simpleondemand.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+#include "governor.h"
+
+struct userspace_data {
+ unsigned long user_frequency;
+ bool valid;
+};
+
+static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+{
+ struct userspace_data *data = df->data;
+
+ if (!data->valid)
+ *freq = df->previous_freq; /* No user freq specified yet */
+ else
+ *freq = data->user_frequency;
+ return 0;
+}
+
+static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ struct userspace_data *data;
+ unsigned long wanted;
+ int err = 0;
+
+
+ mutex_lock(&devfreq->lock);
+ data = devfreq->data;
+
+ sscanf(buf, "%lu", &wanted);
+ data->user_frequency = wanted;
+ data->valid = true;
+ err = update_devfreq(devfreq);
+ if (err == 0)
+ err = count;
+ mutex_unlock(&devfreq->lock);
+ return err;
+}
+
+static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ struct userspace_data *data;
+ int err = 0;
+
+ mutex_lock(&devfreq->lock);
+ data = devfreq->data;
+
+ if (data->valid)
+ err = sprintf(buf, "%lu\n", data->user_frequency);
+ else
+ err = sprintf(buf, "undefined\n");
+ mutex_unlock(&devfreq->lock);
+ return err;
+}
+
+static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
+static struct attribute *dev_entries[] = {
+ &dev_attr_set_freq.attr,
+ NULL,
+};
+static struct attribute_group dev_attr_group = {
+ .name = "userspace",
+ .attrs = dev_entries,
+};
+
+static int userspace_init(struct devfreq *devfreq)
+{
+ int err = 0;
+ struct userspace_data *data = kzalloc(sizeof(struct userspace_data),
+ GFP_KERNEL);
+
+ if (!data) {
+ err = -ENOMEM;
+ goto out;
+ }
+ data->valid = false;
+ devfreq->data = data;
+
+ err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+out:
+ return err;
+}
+
+static void userspace_exit(struct devfreq *devfreq)
+{
+ sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+ kfree(devfreq->data);
+ devfreq->data = NULL;
+}
+
+const struct devfreq_governor devfreq_userspace = {
+ .name = "userspace",
+ .get_target_freq = devfreq_userspace_func,
+ .init = userspace_init,
+ .exit = userspace_exit,
+ .no_central_polling = true,
+};