aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2/clockdomain.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-omap2/clockdomain.c')
-rw-r--r--arch/arm/mach-omap2/clockdomain.c362
1 files changed, 274 insertions, 88 deletions
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 6cb6c03..ad07689 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -1,8 +1,8 @@
/*
* OMAP2/3/4 clockdomain framework functions
*
- * Copyright (C) 2008-2010 Texas Instruments, Inc.
- * Copyright (C) 2008-2010 Nokia Corporation
+ * Copyright (C) 2008-2011 Texas Instruments, Inc.
+ * Copyright (C) 2008-2011 Nokia Corporation
*
* Written by Paul Walmsley and Jouni Högander
* Added OMAP4 specific support by Abhijit Pagare <abhijitpagare@ti.com>
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/list.h>
#include <linux/errno.h>
+#include <linux/string.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/limits.h>
@@ -73,9 +74,6 @@ static int _clkdm_register(struct clockdomain *clkdm)
if (!clkdm || !clkdm->name)
return -EINVAL;
- if (!omap_chip_is(clkdm->omap_chip))
- return -EINVAL;
-
pwrdm = pwrdm_lookup(clkdm->pwrdm.name);
if (!pwrdm) {
pr_err("clockdomain: %s: powerdomain %s does not exist\n",
@@ -92,6 +90,8 @@ static int _clkdm_register(struct clockdomain *clkdm)
pwrdm_add_clkdm(pwrdm, clkdm);
+ spin_lock_init(&clkdm->lock);
+
pr_debug("clockdomain: registered %s\n", clkdm->name);
return 0;
@@ -103,13 +103,10 @@ static struct clkdm_dep *_clkdm_deps_lookup(struct clockdomain *clkdm,
{
struct clkdm_dep *cd;
- if (!clkdm || !deps || !omap_chip_is(clkdm->omap_chip))
+ if (!clkdm || !deps)
return ERR_PTR(-EINVAL);
for (cd = deps; cd->clkdm_name; cd++) {
- if (!omap_chip_is(cd->omap_chip))
- continue;
-
if (!cd->clkdm && cd->clkdm_name)
cd->clkdm = _clkdm_lookup(cd->clkdm_name);
@@ -146,9 +143,6 @@ static void _autodep_lookup(struct clkdm_autodep *autodep)
if (!autodep)
return;
- if (!omap_chip_is(autodep->omap_chip))
- return;
-
clkdm = clkdm_lookup(autodep->clkdm.name);
if (!clkdm) {
pr_err("clockdomain: autodeps: clockdomain %s does not exist\n",
@@ -180,9 +174,6 @@ void _clkdm_add_autodeps(struct clockdomain *clkdm)
if (IS_ERR(autodep->clkdm.ptr))
continue;
- if (!omap_chip_is(autodep->omap_chip))
- continue;
-
pr_debug("clockdomain: adding %s sleepdep/wkdep for "
"clkdm %s\n", autodep->clkdm.ptr->name,
clkdm->name);
@@ -214,9 +205,6 @@ void _clkdm_del_autodeps(struct clockdomain *clkdm)
if (IS_ERR(autodep->clkdm.ptr))
continue;
- if (!omap_chip_is(autodep->omap_chip))
- continue;
-
pr_debug("clockdomain: removing %s sleepdep/wkdep for "
"clkdm %s\n", autodep->clkdm.ptr->name,
clkdm->name);
@@ -241,8 +229,6 @@ static void _resolve_clkdm_deps(struct clockdomain *clkdm,
struct clkdm_dep *cd;
for (cd = clkdm_deps; cd && cd->clkdm_name; cd++) {
- if (!omap_chip_is(cd->omap_chip))
- continue;
if (cd->clkdm)
continue;
cd->clkdm = _clkdm_lookup(cd->clkdm_name);
@@ -255,43 +241,113 @@ static void _resolve_clkdm_deps(struct clockdomain *clkdm,
/* Public functions */
/**
- * clkdm_init - set up the clockdomain layer
- * @clkdms: optional pointer to an array of clockdomains to register
- * @init_autodeps: optional pointer to an array of autodeps to register
- * @custom_funcs: func pointers for arch specific implementations
- *
- * Set up internal state. If a pointer to an array of clockdomains
- * @clkdms was supplied, loop through the list of clockdomains,
- * register all that are available on the current platform. Similarly,
- * if a pointer to an array of clockdomain autodependencies
- * @init_autodeps was provided, register those. No return value.
+ * clkdm_register_platform_funcs - register clockdomain implementation fns
+ * @co: func pointers for arch specific implementations
+ *
+ * Register the list of function pointers used to implement the
+ * clockdomain functions on different OMAP SoCs. Should be called
+ * before any other clkdm_register*() function. Returns -EINVAL if
+ * @co is null, -EEXIST if platform functions have already been
+ * registered, or 0 upon success.
*/
-void clkdm_init(struct clockdomain **clkdms,
- struct clkdm_autodep *init_autodeps,
- struct clkdm_ops *custom_funcs)
+int clkdm_register_platform_funcs(struct clkdm_ops *co)
+{
+ if (!co)
+ return -EINVAL;
+
+ if (arch_clkdm)
+ return -EEXIST;
+
+ arch_clkdm = co;
+
+ return 0;
+};
+
+/**
+ * clkdm_register_clkdms - register SoC clockdomains
+ * @cs: pointer to an array of struct clockdomain to register
+ *
+ * Register the clockdomains available on a particular OMAP SoC. Must
+ * be called after clkdm_register_platform_funcs(). May be called
+ * multiple times. Returns -EACCES if called before
+ * clkdm_register_platform_funcs(); -EINVAL if the argument @cs is
+ * null; or 0 upon success.
+ */
+int clkdm_register_clkdms(struct clockdomain **cs)
{
struct clockdomain **c = NULL;
- struct clockdomain *clkdm;
- struct clkdm_autodep *autodep = NULL;
- if (!custom_funcs)
- WARN(1, "No custom clkdm functions registered\n");
- else
- arch_clkdm = custom_funcs;
+ if (!arch_clkdm)
+ return -EACCES;
+
+ if (!cs)
+ return -EINVAL;
+
+ for (c = cs; *c; c++)
+ _clkdm_register(*c);
+
+ return 0;
+}
+
+/**
+ * clkdm_register_autodeps - register autodeps (if required)
+ * @ia: pointer to a static array of struct clkdm_autodep to register
+ *
+ * Register clockdomain "automatic dependencies." These are
+ * clockdomain wakeup and sleep dependencies that are automatically
+ * added whenever the first clock inside a clockdomain is enabled, and
+ * removed whenever the last clock inside a clockdomain is disabled.
+ * These are currently only used on OMAP3 devices, and are deprecated,
+ * since they waste energy. However, until the OMAP2/3 IP block
+ * enable/disable sequence can be converted to match the OMAP4
+ * sequence, they are needed.
+ *
+ * Must be called only after all of the SoC clockdomains are
+ * registered, since the function will resolve autodep clockdomain
+ * names into clockdomain pointers.
+ *
+ * The struct clkdm_autodep @ia array must be static, as this function
+ * does not copy the array elements.
+ *
+ * Returns -EACCES if called before any clockdomains have been
+ * registered, -EINVAL if called with a null @ia argument, -EEXIST if
+ * autodeps have already been registered, or 0 upon success.
+ */
+int clkdm_register_autodeps(struct clkdm_autodep *ia)
+{
+ struct clkdm_autodep *a = NULL;
+
+ if (list_empty(&clkdm_list))
+ return -EACCES;
- if (clkdms)
- for (c = clkdms; *c; c++)
- _clkdm_register(*c);
+ if (!ia)
+ return -EINVAL;
- autodeps = init_autodeps;
if (autodeps)
- for (autodep = autodeps; autodep->clkdm.ptr; autodep++)
- _autodep_lookup(autodep);
+ return -EEXIST;
+
+ autodeps = ia;
+ for (a = autodeps; a->clkdm.ptr; a++)
+ _autodep_lookup(a);
+
+ return 0;
+}
+
+/**
+ * clkdm_complete_init - set up the clockdomain layer
+ *
+ * Put all clockdomains into software-supervised mode; PM code should
+ * later enable hardware-supervised mode as appropriate. Must be
+ * called after clkdm_register_clkdms(). Returns -EACCES if called
+ * before clkdm_register_clkdms(), or 0 upon success.
+ */
+int clkdm_complete_init(void)
+{
+ struct clockdomain *clkdm;
+
+ if (list_empty(&clkdm_list))
+ return -EACCES;
- /*
- * Put all clockdomains into software-supervised mode; PM code
- * should later enable hardware-supervised mode as appropriate
- */
list_for_each_entry(clkdm, &clkdm_list, node) {
if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
clkdm_wakeup(clkdm);
@@ -304,6 +360,8 @@ void clkdm_init(struct clockdomain **clkdms,
_resolve_clkdm_deps(clkdm, clkdm->sleepdep_srcs);
clkdm_clear_all_sleepdeps(clkdm);
}
+
+ return 0;
}
/**
@@ -690,6 +748,9 @@ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)
*/
int clkdm_sleep(struct clockdomain *clkdm)
{
+ int ret;
+ unsigned long flags;
+
if (!clkdm)
return -EINVAL;
@@ -704,7 +765,11 @@ int clkdm_sleep(struct clockdomain *clkdm)
pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name);
- return arch_clkdm->clkdm_sleep(clkdm);
+ spin_lock_irqsave(&clkdm->lock, flags);
+ clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
+ ret = arch_clkdm->clkdm_sleep(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+ return ret;
}
/**
@@ -718,6 +783,9 @@ int clkdm_sleep(struct clockdomain *clkdm)
*/
int clkdm_wakeup(struct clockdomain *clkdm)
{
+ int ret;
+ unsigned long flags;
+
if (!clkdm)
return -EINVAL;
@@ -732,7 +800,12 @@ int clkdm_wakeup(struct clockdomain *clkdm)
pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name);
- return arch_clkdm->clkdm_wakeup(clkdm);
+ spin_lock_irqsave(&clkdm->lock, flags);
+ clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
+ ret = arch_clkdm->clkdm_wakeup(clkdm);
+ ret |= pwrdm_state_switch(clkdm->pwrdm.ptr);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+ return ret;
}
/**
@@ -747,6 +820,8 @@ int clkdm_wakeup(struct clockdomain *clkdm)
*/
void clkdm_allow_idle(struct clockdomain *clkdm)
{
+ unsigned long flags;
+
if (!clkdm)
return;
@@ -762,8 +837,11 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
pr_debug("clockdomain: enabling automatic idle transitions for %s\n",
clkdm->name);
+ spin_lock_irqsave(&clkdm->lock, flags);
+ clkdm->_flags |= _CLKDM_FLAG_HWSUP_ENABLED;
arch_clkdm->clkdm_allow_idle(clkdm);
pwrdm_clkdm_state_switch(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
}
/**
@@ -777,6 +855,8 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
*/
void clkdm_deny_idle(struct clockdomain *clkdm)
{
+ unsigned long flags;
+
if (!clkdm)
return;
@@ -792,11 +872,91 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
clkdm->name);
+ spin_lock_irqsave(&clkdm->lock, flags);
+ clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
arch_clkdm->clkdm_deny_idle(clkdm);
+ pwrdm_state_switch(clkdm->pwrdm.ptr);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
}
+/**
+ * clkdm_in_hwsup - is clockdomain @clkdm have hardware-supervised idle enabled?
+ * @clkdm: struct clockdomain *
+ *
+ * Returns true if clockdomain @clkdm currently has
+ * hardware-supervised idle enabled, or false if it does not or if
+ * @clkdm is NULL. It is only valid to call this function after
+ * clkdm_init() has been called. This function does not actually read
+ * bits from the hardware; it instead tests an in-memory flag that is
+ * changed whenever the clockdomain code changes the auto-idle mode.
+ */
+bool clkdm_in_hwsup(struct clockdomain *clkdm)
+{
+ bool ret;
+ unsigned long flags;
-/* Clockdomain-to-clock framework interface code */
+ if (!clkdm)
+ return false;
+
+ spin_lock_irqsave(&clkdm->lock, flags);
+ ret = (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false;
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+
+ return ret;
+}
+
+/* Clockdomain-to-clock/hwmod framework interface code */
+
+static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
+{
+ unsigned long flags;
+
+ if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
+ return -EINVAL;
+
+ /*
+ * For arch's with no autodeps, clkcm_clk_enable
+ * should be called for every clock instance or hwmod that is
+ * enabled, so the clkdm can be force woken up.
+ */
+ if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
+ return 0;
+
+ spin_lock_irqsave(&clkdm->lock, flags);
+ arch_clkdm->clkdm_clk_enable(clkdm);
+ pwrdm_wait_transition(clkdm->pwrdm.ptr);
+ pwrdm_clkdm_state_switch(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+
+ pr_debug("clockdomain: clkdm %s: enabled\n", clkdm->name);
+
+ return 0;
+}
+
+static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
+{
+ unsigned long flags;
+
+ if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
+ return -EINVAL;
+
+ if (atomic_read(&clkdm->usecount) == 0) {
+ WARN_ON(1); /* underflow */
+ return -ERANGE;
+ }
+
+ if (atomic_dec_return(&clkdm->usecount) > 0)
+ return 0;
+
+ spin_lock_irqsave(&clkdm->lock, flags);
+ arch_clkdm->clkdm_clk_disable(clkdm);
+ pwrdm_clkdm_state_switch(clkdm);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+
+ pr_debug("clockdomain: clkdm %s: disabled\n", clkdm->name);
+
+ return 0;
+}
/**
* clkdm_clk_enable - add an enabled downstream clock to this clkdm
@@ -819,25 +979,10 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
* downstream clocks for debugging purposes?
*/
- if (!clkdm || !clk)
- return -EINVAL;
-
- if (!arch_clkdm || !arch_clkdm->clkdm_clk_enable)
+ if (!clk)
return -EINVAL;
- if (atomic_inc_return(&clkdm->usecount) > 1)
- return 0;
-
- /* Clockdomain now has one enabled downstream clock */
-
- pr_debug("clockdomain: clkdm %s: clk %s now enabled\n", clkdm->name,
- clk->name);
-
- arch_clkdm->clkdm_clk_enable(clkdm);
- pwrdm_wait_transition(clkdm->pwrdm.ptr);
- pwrdm_clkdm_state_switch(clkdm);
-
- return 0;
+ return _clkdm_clk_hwmod_enable(clkdm);
}
/**
@@ -850,9 +995,8 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
* clockdomain usecount goes to 0, put the clockdomain to sleep
* (software-supervised mode) or remove the clkdm autodependencies
* (hardware-supervised mode). Returns -EINVAL if passed null
- * pointers; -ERANGE if the @clkdm usecount underflows and debugging
- * is enabled; or returns 0 upon success or if the clockdomain is in
- * hwsup idle mode.
+ * pointers; -ERANGE if the @clkdm usecount underflows; or returns 0
+ * upon success or if the clockdomain is in hwsup idle mode.
*/
int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
{
@@ -861,30 +1005,72 @@ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
* downstream clocks for debugging purposes?
*/
- if (!clkdm || !clk)
+ if (!clk)
return -EINVAL;
- if (!arch_clkdm || !arch_clkdm->clkdm_clk_disable)
+ return _clkdm_clk_hwmod_disable(clkdm);
+}
+
+/**
+ * clkdm_hwmod_enable - add an enabled downstream hwmod to this clkdm
+ * @clkdm: struct clockdomain *
+ * @oh: struct omap_hwmod * of the enabled downstream hwmod
+ *
+ * Increment the usecount of the clockdomain @clkdm and ensure that it
+ * is awake before @oh is enabled. Intended to be called by
+ * module_enable() code.
+ * If the clockdomain is in software-supervised idle mode, force the
+ * clockdomain to wake. If the clockdomain is in hardware-supervised idle
+ * mode, add clkdm-pwrdm autodependencies, to ensure that devices in the
+ * clockdomain can be read from/written to by on-chip processors.
+ * Returns -EINVAL if passed null pointers;
+ * returns 0 upon success or if the clockdomain is in hwsup idle mode.
+ */
+int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh)
+{
+ /* The clkdm attribute does not exist yet prior OMAP4 */
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ return 0;
+
+ /*
+ * XXX Rewrite this code to maintain a list of enabled
+ * downstream hwmods for debugging purposes?
+ */
+
+ if (!oh)
return -EINVAL;
-#ifdef DEBUG
- if (atomic_read(&clkdm->usecount) == 0) {
- WARN_ON(1); /* underflow */
- return -ERANGE;
- }
-#endif
+ return _clkdm_clk_hwmod_enable(clkdm);
+}
- if (atomic_dec_return(&clkdm->usecount) > 0)
+/**
+ * clkdm_hwmod_disable - remove an enabled downstream hwmod from this clkdm
+ * @clkdm: struct clockdomain *
+ * @oh: struct omap_hwmod * of the disabled downstream hwmod
+ *
+ * Decrement the usecount of this clockdomain @clkdm when @oh is
+ * disabled. Intended to be called by module_disable() code.
+ * If the clockdomain usecount goes to 0, put the clockdomain to sleep
+ * (software-supervised mode) or remove the clkdm autodependencies
+ * (hardware-supervised mode).
+ * Returns -EINVAL if passed null pointers; -ERANGE if the @clkdm usecount
+ * underflows; or returns 0 upon success or if the clockdomain is in hwsup
+ * idle mode.
+ */
+int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
+{
+ /* The clkdm attribute does not exist yet prior OMAP4 */
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
return 0;
- /* All downstream clocks of this clockdomain are now disabled */
-
- pr_debug("clockdomain: clkdm %s: clk %s now disabled\n", clkdm->name,
- clk->name);
+ /*
+ * XXX Rewrite this code to maintain a list of enabled
+ * downstream hwmods for debugging purposes?
+ */
- arch_clkdm->clkdm_clk_disable(clkdm);
- pwrdm_clkdm_state_switch(clkdm);
+ if (!oh)
+ return -EINVAL;
- return 0;
+ return _clkdm_clk_hwmod_disable(clkdm);
}