aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorcodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
committercodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
commitc6da2cfeb05178a11c6d062a06f8078150ee492f (patch)
treef3b4021d252c52d6463a9b3c1bb7245e399b009c /arch/arm/kernel
parentc6d7c4dbff353eac7919342ae6b3299a378160a6 (diff)
downloadkernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2
samsung update 1
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/.gitignore1
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/etm.c473
-rw-r--r--arch/arm/kernel/hibernate.c470
-rw-r--r--arch/arm/kernel/hibernate_asm.S139
-rw-r--r--arch/arm/kernel/leds.c27
-rw-r--r--arch/arm/kernel/perf_event.c6
-rw-r--r--arch/arm/kernel/perf_event_v7.c344
-rw-r--r--arch/arm/kernel/process.c120
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/smp.c132
-rw-r--r--arch/arm/kernel/smp_scu.c16
-rw-r--r--arch/arm/kernel/stacktrace.c15
-rw-r--r--arch/arm/kernel/traps.c4
15 files changed, 1572 insertions, 182 deletions
diff --git a/arch/arm/kernel/.gitignore b/arch/arm/kernel/.gitignore
deleted file mode 100644
index c5f676c..0000000
--- a/arch/arm/kernel/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vmlinux.lds
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index a5b31af..6dccbbf 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o
obj-$(CONFIG_OF) += devtree.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 90c62cd..2cd0076 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -496,7 +496,7 @@ __und_usr:
blo __und_usr_unknown
3: ldrht r0, [r4]
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
- orr r0, r0, r5, lsl #16
+ orr r0, r0, r5, lsl #16
#else
b __und_usr_unknown
#endif
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 1bec8b5..496b8b8 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/clk.h>
@@ -36,26 +37,36 @@ MODULE_AUTHOR("Alexander Shishkin");
struct tracectx {
unsigned int etb_bufsz;
void __iomem *etb_regs;
- void __iomem *etm_regs;
+ void __iomem **etm_regs;
+ int etm_regs_count;
unsigned long flags;
int ncmppairs;
int etm_portsz;
+ u32 etb_fc;
+ unsigned long range_start;
+ unsigned long range_end;
+ unsigned long data_range_start;
+ unsigned long data_range_end;
+ bool dump_initial_etb;
struct device *dev;
struct clk *emu_clk;
struct mutex mutex;
};
-static struct tracectx tracer;
+static struct tracectx tracer = {
+ .range_start = (unsigned long)_stext,
+ .range_end = (unsigned long)_etext,
+};
static inline bool trace_isrunning(struct tracectx *t)
{
return !!(t->flags & TRACER_RUNNING);
}
-static int etm_setup_address_range(struct tracectx *t, int n,
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
unsigned long start, unsigned long end, int exclude, int data)
{
- u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
+ u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
ETMAAT_NOVALCMP;
if (n < 1 || n > t->ncmppairs)
@@ -71,95 +82,155 @@ static int etm_setup_address_range(struct tracectx *t, int n,
flags |= ETMAAT_IEXEC;
/* first comparator for the range */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
- etm_writel(t, start, ETMR_COMP_VAL(n * 2));
+ etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+ etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
/* second comparator is right next to it */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
- etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
-
- flags = exclude ? ETMTE_INCLEXCL : 0;
- etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
+ etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+ etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
+
+ if (data) {
+ flags = exclude ? ETMVDC3_EXCLONLY : 0;
+ if (exclude)
+ n += 8;
+ etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+ } else {
+ flags = exclude ? ETMTE_INCLEXCL : 0;
+ etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+ }
return 0;
}
-static int trace_start(struct tracectx *t)
+static int trace_start_etm(struct tracectx *t, int id)
{
u32 v;
unsigned long timeout = TRACER_TIMEOUT;
- etb_unlock(t);
-
- etb_writel(t, 0, ETBR_FORMATTERCTRL);
- etb_writel(t, 1, ETBR_CTRL);
-
- etb_lock(t);
-
- /* configure etm */
v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
if (t->flags & TRACER_CYCLE_ACC)
v |= ETMCTRL_CYCLEACCURATE;
- etm_unlock(t);
+ if (t->flags & TRACER_TRACE_DATA)
+ v |= ETMCTRL_DATA_DO_ADDR;
+
+ etm_unlock(t, id);
- etm_writel(t, v, ETMR_CTRL);
+ etm_writel(t, id, v, ETMR_CTRL);
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+ while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_setup_address_range(t, 1, (unsigned long)_stext,
- (unsigned long)_etext, 0, 0);
- etm_writel(t, 0, ETMR_TRACEENCTRL2);
- etm_writel(t, 0, ETMR_TRACESSCTRL);
- etm_writel(t, 0x6f, ETMR_TRACEENEVT);
+ if (t->range_start || t->range_end)
+ etm_setup_address_range(t, id, 1,
+ t->range_start, t->range_end, 0, 0);
+ else
+ etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+ etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+ etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+ etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+ etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+ etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+ if (t->data_range_start || t->data_range_end)
+ etm_setup_address_range(t, id, 2, t->data_range_start,
+ t->data_range_end, 0, 1);
+ else
+ etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+ etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
v &= ~ETMCTRL_PROGRAM;
v |= ETMCTRL_PORTSEL;
- etm_writel(t, v, ETMR_CTRL);
+ etm_writel(t, id, v, ETMR_CTRL);
timeout = TRACER_TIMEOUT;
- while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+ while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
- etm_lock(t);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_lock(t);
+ etm_lock(t, id);
+ return 0;
+}
+
+static int trace_start(struct tracectx *t)
+{
+ int ret;
+ int id;
+ u32 etb_fc = t->etb_fc;
+
+ etb_unlock(t);
+
+ t->dump_initial_etb = false;
+ etb_writel(t, 0, ETBR_WRITEADDR);
+ etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
+ etb_writel(t, 1, ETBR_CTRL);
+
+ etb_lock(t);
+
+ /* configure etm(s) */
+ for (id = 0; id < t->etm_regs_count; id++) {
+ ret = trace_start_etm(t, id);
+ if (ret)
+ return ret;
+ }
t->flags |= TRACER_RUNNING;
return 0;
}
-static int trace_stop(struct tracectx *t)
+static int trace_stop_etm(struct tracectx *t, int id)
{
unsigned long timeout = TRACER_TIMEOUT;
- etm_unlock(t);
+ etm_unlock(t, id);
- etm_writel(t, 0x440, ETMR_CTRL);
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+ etm_writel(t, id, 0x441, ETMR_CTRL);
+ while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_lock(t);
+ etm_lock(t, id);
+ return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+ int id;
+ int ret;
+ unsigned long timeout = TRACER_TIMEOUT;
+ u32 etb_fc = t->etb_fc;
+
+ for (id = 0; id < t->etm_regs_count; id++) {
+ ret = trace_stop_etm(t, id);
+ if (ret)
+ return ret;
+ }
etb_unlock(t);
- etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+ if (etb_fc) {
+ etb_fc |= ETBFF_STOPFL;
+ etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+ }
+ etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
timeout = TRACER_TIMEOUT;
while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -184,24 +255,15 @@ static int trace_stop(struct tracectx *t)
static int etb_getdatalen(struct tracectx *t)
{
u32 v;
- int rp, wp;
+ int wp;
v = etb_readl(t, ETBR_STATUS);
if (v & 1)
return t->etb_bufsz;
- rp = etb_readl(t, ETBR_READADDR);
wp = etb_readl(t, ETBR_WRITEADDR);
-
- if (rp > wp) {
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
- return 0;
- }
-
- return wp - rp;
+ return wp;
}
/* sysrq+v will always stop the running trace and leave it at that */
@@ -234,21 +296,18 @@ static void etm_dump(void)
printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
printk(KERN_INFO "\n--- ETB buffer end ---\n");
- /* deassert the overflow bit */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
etb_lock(t);
}
static void sysrq_etm_dump(int key)
{
+ if (!mutex_trylock(&tracer.mutex)) {
+ printk(KERN_INFO "Tracing hardware busy\n");
+ return;
+ }
dev_dbg(tracer.dev, "Dumping ETB buffer\n");
etm_dump();
+ mutex_unlock(&tracer.mutex);
}
static struct sysrq_key_op sysrq_etm_op = {
@@ -275,6 +334,10 @@ static ssize_t etb_read(struct file *file, char __user *data,
struct tracectx *t = file->private_data;
u32 first = 0;
u32 *buf;
+ int wpos;
+ int skip;
+ long wlength;
+ loff_t pos = *ppos;
mutex_lock(&t->mutex);
@@ -286,31 +349,39 @@ static ssize_t etb_read(struct file *file, char __user *data,
etb_unlock(t);
total = etb_getdatalen(t);
+ if (total == 0 && t->dump_initial_etb)
+ total = t->etb_bufsz;
if (total == t->etb_bufsz)
first = etb_readl(t, ETBR_WRITEADDR);
+ if (pos > total * 4) {
+ skip = 0;
+ wpos = total;
+ } else {
+ skip = (int)pos % 4;
+ wpos = (int)pos / 4;
+ }
+ total -= wpos;
+ first = (first + wpos) % t->etb_bufsz;
+
etb_writel(t, first, ETBR_READADDR);
- length = min(total * 4, (int)len);
- buf = vmalloc(length);
+ wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+ length = min(total * 4 - skip, (int)len);
+ buf = vmalloc(wlength * 4);
- dev_dbg(t->dev, "ETB buffer length: %d\n", total);
+ dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+ length, pos, wlength, first);
+ dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
- for (i = 0; i < length / 4; i++)
+ for (i = 0; i < wlength; i++)
buf[i] = etb_readl(t, ETBR_READMEM);
- /* the only way to deassert overflow bit in ETB status is this */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_WRITEADDR);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
etb_lock(t);
- length -= copy_to_user(data, buf, length);
+ length -= copy_to_user(data, (u8 *)buf + skip, length);
vfree(buf);
+ *ppos = pos + length;
out:
mutex_unlock(&t->mutex);
@@ -347,28 +418,17 @@ static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id
if (ret)
goto out;
+ mutex_lock(&t->mutex);
t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
if (!t->etb_regs) {
ret = -ENOMEM;
goto out_release;
}
+ t->dev = &dev->dev;
+ t->dump_initial_etb = true;
amba_set_drvdata(dev, t);
- etb_miscdev.parent = &dev->dev;
-
- ret = misc_register(&etb_miscdev);
- if (ret)
- goto out_unmap;
-
- t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
- if (IS_ERR(t->emu_clk)) {
- dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
- return -EFAULT;
- }
-
- clk_enable(t->emu_clk);
-
etb_unlock(t);
t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -377,6 +437,20 @@ static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id
etb_writel(t, 0, ETBR_CTRL);
etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
etb_lock(t);
+ mutex_unlock(&t->mutex);
+
+ etb_miscdev.parent = &dev->dev;
+
+ ret = misc_register(&etb_miscdev);
+ if (ret)
+ goto out_unmap;
+
+ /* Get optional clock. Currently used to select clock source on omap3 */
+ t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+ if (IS_ERR(t->emu_clk))
+ dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+ else
+ clk_enable(t->emu_clk);
dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
@@ -384,10 +458,13 @@ out:
return ret;
out_unmap:
+ mutex_lock(&t->mutex);
amba_set_drvdata(dev, NULL);
iounmap(t->etb_regs);
+ t->etb_regs = NULL;
out_release:
+ mutex_unlock(&t->mutex);
amba_release_regions(dev);
return ret;
@@ -402,8 +479,10 @@ static int etb_remove(struct amba_device *dev)
iounmap(t->etb_regs);
t->etb_regs = NULL;
- clk_disable(t->emu_clk);
- clk_put(t->emu_clk);
+ if (!IS_ERR(t->emu_clk)) {
+ clk_disable(t->emu_clk);
+ clk_put(t->emu_clk);
+ }
amba_release_regions(dev);
@@ -447,7 +526,10 @@ static ssize_t trace_running_store(struct kobject *kobj,
return -EINVAL;
mutex_lock(&tracer.mutex);
- ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+ if (!tracer.etb_regs)
+ ret = -ENODEV;
+ else
+ ret = value ? trace_start(&tracer) : trace_stop(&tracer);
mutex_unlock(&tracer.mutex);
return ret ? : n;
@@ -462,36 +544,50 @@ static ssize_t trace_info_show(struct kobject *kobj,
{
u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
int datalen;
+ int id;
+ int ret;
- etb_unlock(&tracer);
- datalen = etb_getdatalen(&tracer);
- etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
- etb_ra = etb_readl(&tracer, ETBR_READADDR);
- etb_st = etb_readl(&tracer, ETBR_STATUS);
- etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
- etb_lock(&tracer);
-
- etm_unlock(&tracer);
- etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
- etm_st = etm_readl(&tracer, ETMR_STATUS);
- etm_lock(&tracer);
+ mutex_lock(&tracer.mutex);
+ if (tracer.etb_regs) {
+ etb_unlock(&tracer);
+ datalen = etb_getdatalen(&tracer);
+ etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+ etb_ra = etb_readl(&tracer, ETBR_READADDR);
+ etb_st = etb_readl(&tracer, ETBR_STATUS);
+ etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+ etb_lock(&tracer);
+ } else {
+ etb_wa = etb_ra = etb_st = etb_fc = ~0;
+ datalen = -1;
+ }
- return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+ ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
"ETBR_WRITEADDR:\t%08x\n"
"ETBR_READADDR:\t%08x\n"
"ETBR_STATUS:\t%08x\n"
- "ETBR_FORMATTERCTRL:\t%08x\n"
- "ETMR_CTRL:\t%08x\n"
- "ETMR_STATUS:\t%08x\n",
+ "ETBR_FORMATTERCTRL:\t%08x\n",
datalen,
tracer.ncmppairs,
etb_wa,
etb_ra,
etb_st,
- etb_fc,
+ etb_fc
+ );
+
+ for (id = 0; id < tracer.etm_regs_count; id++) {
+ etm_unlock(&tracer, id);
+ etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+ etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+ etm_lock(&tracer, id);
+ ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+ "ETMR_STATUS:\t%08x\n",
etm_ctrl,
etm_st
);
+ }
+ mutex_unlock(&tracer.mutex);
+
+ return ret;
}
static struct kobj_attribute trace_info_attr =
@@ -530,42 +626,121 @@ static ssize_t trace_mode_store(struct kobject *kobj,
static struct kobj_attribute trace_mode_attr =
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
+static ssize_t trace_range_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%08lx %08lx\n",
+ tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long range_start, range_end;
+
+ if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ tracer.range_start = range_start;
+ tracer.range_end = range_end;
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+ __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long range_start;
+ u64 range_end;
+ mutex_lock(&tracer.mutex);
+ range_start = tracer.data_range_start;
+ range_end = tracer.data_range_end;
+ if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+ range_end = 0x100000000ULL;
+ mutex_unlock(&tracer.mutex);
+ return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long range_start;
+ u64 range_end;
+
+ if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ tracer.data_range_start = range_start;
+ tracer.data_range_end = (unsigned long)range_end;
+ if (range_end)
+ tracer.flags |= TRACER_TRACE_DATA;
+ else
+ tracer.flags &= ~TRACER_TRACE_DATA;
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+ __ATTR(trace_data_range, 0644,
+ trace_data_range_show, trace_data_range_store);
+
static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
+ void __iomem **new_regs;
+ int new_count;
- if (t->etm_regs) {
- dev_dbg(&dev->dev, "ETM already initialized\n");
- ret = -EBUSY;
+ mutex_lock(&t->mutex);
+ new_count = t->etm_regs_count + 1;
+ new_regs = krealloc(t->etm_regs,
+ sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
+
+ if (!new_regs) {
+ dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+ ret = -ENOMEM;
goto out;
}
+ t->etm_regs = new_regs;
ret = amba_request_regions(dev, NULL);
if (ret)
goto out;
- t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
- if (!t->etm_regs) {
+ t->etm_regs[t->etm_regs_count] =
+ ioremap_nocache(dev->res.start, resource_size(&dev->res));
+ if (!t->etm_regs[t->etm_regs_count]) {
ret = -ENOMEM;
goto out_release;
}
- amba_set_drvdata(dev, t);
+ amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
- mutex_init(&t->mutex);
- t->dev = &dev->dev;
- t->flags = TRACER_CYCLE_ACC;
+ t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA;
t->etm_portsz = 1;
- etm_unlock(t);
- (void)etm_readl(t, ETMMR_PDSR);
+ etm_unlock(t, t->etm_regs_count);
+ (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
/* dummy first read */
- (void)etm_readl(&tracer, ETMMR_OSSRR);
+ (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
- t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
- etm_writel(t, 0x440, ETMR_CTRL);
- etm_lock(t);
+ t->ncmppairs = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE) & 0xf;
+ etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+ etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+ etm_lock(t, t->etm_regs_count);
ret = sysfs_create_file(&dev->dev.kobj,
&trace_running_attr.attr);
@@ -581,36 +756,68 @@ static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id
if (ret)
dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
- dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
+ ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+ ret = sysfs_create_file(&dev->dev.kobj, &trace_data_range_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev,
+ "Failed to create trace_data_range in sysfs\n");
+
+ dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+ /* Enable formatter if there are multiple trace sources */
+ if (new_count > 1)
+ t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+ t->etm_regs_count = new_count;
out:
+ mutex_unlock(&t->mutex);
return ret;
out_unmap:
amba_set_drvdata(dev, NULL);
- iounmap(t->etm_regs);
+ iounmap(t->etm_regs[t->etm_regs_count]);
out_release:
amba_release_regions(dev);
+ mutex_unlock(&t->mutex);
return ret;
}
static int etm_remove(struct amba_device *dev)
{
- struct tracectx *t = amba_get_drvdata(dev);
+ int i;
+ struct tracectx *t = &tracer;
+ void __iomem *etm_regs = amba_get_drvdata(dev);
+
+ sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
amba_set_drvdata(dev, NULL);
- iounmap(t->etm_regs);
- t->etm_regs = NULL;
+ mutex_lock(&t->mutex);
+ for (i = 0; i < t->etm_regs_count; i++)
+ if (t->etm_regs[i] == etm_regs)
+ break;
+ for (; i < t->etm_regs_count - 1; i++)
+ t->etm_regs[i] = t->etm_regs[i + 1];
+ t->etm_regs_count--;
+ if (!t->etm_regs_count) {
+ kfree(t->etm_regs);
+ t->etm_regs = NULL;
+ }
+ mutex_unlock(&t->mutex);
+ iounmap(etm_regs);
amba_release_regions(dev);
- sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-
return 0;
}
@@ -619,6 +826,10 @@ static struct amba_id etm_ids[] = {
.id = 0x0003b921,
.mask = 0x0007ffff,
},
+ {
+ .id = 0x0003b950,
+ .mask = 0x0007ffff,
+ },
{ 0, 0 },
};
@@ -636,6 +847,8 @@ static int __init etm_init(void)
{
int retval;
+ mutex_init(&tracer.mutex);
+
retval = amba_driver_register(&etb_driver);
if (retval) {
printk(KERN_ERR "Failed to register etb\n");
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
new file mode 100644
index 0000000..354cefc
--- /dev/null
+++ b/arch/arm/kernel/hibernate.c
@@ -0,0 +1,470 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/sections.h>
+
+/*
+ * Image of the saved processor state
+ *
+ * coprocessor 15 registers(RW)
+ */
+struct saved_context_cortex_a8 {
+ /* CR0 */
+ u32 cssr; /* Cache Size Selection */
+ /* CR1 */
+ u32 cr; /* Control */
+ u32 cacr; /* Coprocessor Access Control */
+ /* CR2 */
+ u32 ttb_0r; /* Translation Table Base 0 */
+ u32 ttb_1r; /* Translation Table Base 1 */
+ u32 ttbcr; /* Translation Talbe Base Control */
+ /* CR3 */
+ u32 dacr; /* Domain Access Control */
+ /* CR5 */
+ u32 d_fsr; /* Data Fault Status */
+ u32 i_fsr; /* Instruction Fault Status */
+ u32 d_afsr; /* Data Auxilirary Fault Status */ ;
+ u32 i_afsr; /* Instruction Auxilirary Fault Status */;
+ /* CR6 */
+ u32 d_far; /* Data Fault Address */
+ u32 i_far; /* Instruction Fault Address */
+ /* CR7 */
+ u32 par; /* Physical Address */
+ /* CR9 */ /* FIXME: Are they necessary? */
+ u32 pmcontrolr; /* Performance Monitor Control */
+ u32 cesr; /* Count Enable Set */
+ u32 cecr; /* Count Enable Clear */
+ u32 ofsr; /* Overflow Flag Status */
+ u32 sir; /* Software Increment */
+ u32 pcsr; /* Performance Counter Selection */
+ u32 ccr; /* Cycle Count */
+ u32 esr; /* Event Selection */
+ u32 pmcountr; /* Performance Monitor Count */
+ u32 uer; /* User Enable */
+ u32 iesr; /* Interrupt Enable Set */
+ u32 iecr; /* Interrupt Enable Clear */
+ u32 l2clr; /* L2 Cache Lockdown */
+ /* CR10 */
+ u32 d_tlblr; /* Data TLB Lockdown Register */
+ u32 i_tlblr; /* Instruction TLB Lockdown Register */
+ u32 prrr; /* Primary Region Remap Register */
+ u32 nrrr; /* Normal Memory Remap Register */
+ /* CR11 */
+ u32 pleuar; /* PLE User Accessibility */
+ u32 plecnr; /* PLE Channel Number */
+ u32 plecr; /* PLE Control */
+ u32 pleisar; /* PLE Internal Start Address */
+ u32 pleiear; /* PLE Internal End Address */
+ u32 plecidr; /* PLE Context ID */
+ /* CR12 */
+ u32 snsvbar; /* Secure or Nonsecure Vector Base Address */
+ /* CR13 */
+ u32 fcse; /* FCSE PID */
+ u32 cid; /* Context ID */
+ u32 urwtpid; /* User read/write Thread and Process ID */
+ u32 urotpid; /* User read-only Thread and Process ID */
+ u32 potpid; /* Privileged only Thread and Process ID */
+} __packed;
+
+struct saved_context_cortex_a9 {
+ /* CR0 */
+ u32 cssr; /* Cache Size Selection */
+ /* CR1 */
+ u32 cr;
+ u32 actlr;
+ u32 cacr;
+ u32 sder;
+ u32 vcr;
+ /* CR2 */
+ u32 ttb_0r; /* Translation Table Base 0 */
+ u32 ttb_1r; /* Translation Table Base 1 */
+ u32 ttbcr; /* Translation Talbe Base Control */
+ /* CR3 */
+ u32 dacr; /* Domain Access Control */
+ /* CR5 */
+ u32 d_fsr; /* Data Fault Status */
+ u32 i_fsr; /* Instruction Fault Status */
+ u32 d_afsr; /* Data Auxilirary Fault Status */ ;
+ u32 i_afsr; /* Instruction Auxilirary Fault Status */;
+ /* CR6 */
+ u32 d_far; /* Data Fault Address */
+ u32 i_far; /* Instruction Fault Address */
+ /* CR7 */
+ u32 par; /* Physical Address */
+ /* CR9 */ /* FIXME: Are they necessary? */
+ u32 pmcontrolr; /* Performance Monitor Control */
+ u32 cesr; /* Count Enable Set */
+ u32 cecr; /* Count Enable Clear */
+ u32 ofsr; /* Overflow Flag Status */
+ u32 pcsr; /* Performance Counter Selection */
+ u32 ccr; /* Cycle Count */
+ u32 esr; /* Event Selection */
+ u32 pmcountr; /* Performance Monitor Count */
+ u32 uer; /* User Enable */
+ u32 iesr; /* Interrupt Enable Set */
+ u32 iecr; /* Interrupt Enable Clear */
+ /* CR10 */
+ u32 d_tlblr; /* Data TLB Lockdown Register */
+ u32 prrr; /* Primary Region Remap Register */
+ u32 nrrr; /* Normal Memory Remap Register */
+ /* CR11 */
+ /* CR12 */
+ u32 vbar;
+ u32 mvbar;
+ u32 vir;
+ /* CR13 */
+ u32 fcse; /* FCSE PID */
+ u32 cid; /* Context ID */
+ u32 urwtpid; /* User read/write Thread and Process ID */
+ u32 urotpid; /* User read-only Thread and Process ID */
+ u32 potpid; /* Privileged only Thread and Process ID */
+ /* CR15 */
+ u32 mtlbar;
+} __packed;
+
+union saved_context {
+ struct saved_context_cortex_a8 cortex_a8;
+ struct saved_context_cortex_a9 cortex_a9;
+};
+
+/* Used in hibernate_asm.S */
+#define USER_CONTEXT_SIZE (15 * sizeof(u32))
+unsigned long saved_context_r0[USER_CONTEXT_SIZE];
+unsigned long saved_cpsr;
+unsigned long saved_context_r13_svc;
+unsigned long saved_context_r14_svc;
+unsigned long saved_spsr_svc;
+
+static union saved_context saved_context;
+
+/*
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin)
+ >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end))
+ >> PAGE_SHIFT;
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
+
+#define PART_NUM_CORTEX_A8 (0xC08)
+#define PART_NUM_CORTEX_A9 (0xC09)
+
+static inline u32 arm_primary_part_number(void)
+{
+ u32 id;
+
+ asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r"(id));
+
+ /* Is this ARM? */
+ if ((id & 0xff000000) != 0x41000000)
+ return UINT_MAX;
+
+ id >>= 4;
+ id &= 0xfff;
+ return id;
+}
+
+static inline void __save_processor_state_a8(
+ struct saved_context_cortex_a8 *ctxt)
+{
+ /* CR0 */
+ asm volatile ("mrc p15, 2, %0, c0, c0, 0" : "=r"(ctxt->cssr));
+ /* CR1 */
+ asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(ctxt->cr));
+ asm volatile ("mrc p15, 0, %0, c1, c0, 2" : "=r"(ctxt->cacr));
+ /* CR2 */
+ asm volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r"(ctxt->ttb_0r));
+ asm volatile ("mrc p15, 0, %0, c2, c0, 1" : "=r"(ctxt->ttb_1r));
+ asm volatile ("mrc p15, 0, %0, c2, c0, 2" : "=r"(ctxt->ttbcr));
+ /* CR3 */
+ asm volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r"(ctxt->dacr));
+ /* CR5 */
+ asm volatile ("mrc p15, 0, %0, c5, c0, 0" : "=r"(ctxt->d_fsr));
+ asm volatile ("mrc p15, 0, %0, c5, c0, 1" : "=r"(ctxt->i_fsr));
+ asm volatile ("mrc p15, 0, %0, c5, c1, 0" : "=r"(ctxt->d_afsr));
+ asm volatile ("mrc p15, 0, %0, c5, c1, 1" : "=r"(ctxt->i_afsr));
+ /* CR6 */
+ asm volatile ("mrc p15, 0, %0, c6, c0, 0" : "=r"(ctxt->d_far));
+ asm volatile ("mrc p15, 0, %0, c6, c0, 2" : "=r"(ctxt->i_far));
+ /* CR7 */
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r"(ctxt->par));
+ /* CR9 */
+ asm volatile ("mrc p15, 0, %0, c9, c12, 0" : "=r"(ctxt->pmcontrolr));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 1" : "=r"(ctxt->cesr));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 2" : "=r"(ctxt->cecr));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 3" : "=r"(ctxt->ofsr));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 4" : "=r"(ctxt->sir));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 5" : "=r"(ctxt->pcsr));
+ asm volatile ("mrc p15, 0, %0, c9, c13, 0" : "=r"(ctxt->ccr));
+ asm volatile ("mrc p15, 0, %0, c9, c13, 1" : "=r"(ctxt->esr));
+ asm volatile ("mrc p15, 0, %0, c9, c13, 2" : "=r"(ctxt->pmcountr));
+ asm volatile ("mrc p15, 0, %0, c9, c14, 0" : "=r"(ctxt->uer));
+ asm volatile ("mrc p15, 0, %0, c9, c14, 1" : "=r"(ctxt->iesr));
+ asm volatile ("mrc p15, 0, %0, c9, c14, 2" : "=r"(ctxt->iecr));
+ asm volatile ("mrc p15, 1, %0, c9, c0, 0" : "=r"(ctxt->l2clr));
+ /* CR10 */
+ asm volatile ("mrc p15, 0, %0, c10, c0, 0" : "=r"(ctxt->d_tlblr));
+ asm volatile ("mrc p15, 0, %0, c10, c0, 1" : "=r"(ctxt->i_tlblr));
+ asm volatile ("mrc p15, 0, %0, c10, c2, 0" : "=r"(ctxt->prrr));
+ asm volatile ("mrc p15, 0, %0, c10, c2, 1" : "=r"(ctxt->nrrr));
+ /* CR11 */
+ asm volatile ("mrc p15, 0, %0, c11, c1, 0" : "=r"(ctxt->pleuar));
+ asm volatile ("mrc p15, 0, %0, c11, c2, 0" : "=r"(ctxt->plecnr));
+ asm volatile ("mrc p15, 0, %0, c11, c4, 0" : "=r"(ctxt->plecr));
+ asm volatile ("mrc p15, 0, %0, c11, c5, 0" : "=r"(ctxt->pleisar));
+ asm volatile ("mrc p15, 0, %0, c11, c7, 0" : "=r"(ctxt->pleiear));
+ asm volatile ("mrc p15, 0, %0, c11, c15, 0" : "=r"(ctxt->plecidr));
+ /* CR12 */
+ asm volatile ("mrc p15, 0, %0, c12, c0, 0" : "=r"(ctxt->snsvbar));
+ /* CR13 */
+ asm volatile ("mrc p15, 0, %0, c13, c0, 0" : "=r"(ctxt->fcse));
+ asm volatile ("mrc p15, 0, %0, c13, c0, 1" : "=r"(ctxt->cid));
+ asm volatile ("mrc p15, 0, %0, c13, c0, 2" : "=r"(ctxt->urwtpid));
+ asm volatile ("mrc p15, 0, %0, c13, c0, 3" : "=r"(ctxt->urotpid));
+ asm volatile ("mrc p15, 0, %0, c13, c0, 4" : "=r"(ctxt->potpid));
+}
+
+static inline void __save_processor_state_a9(
+ struct saved_context_cortex_a9 *ctxt)
+{
+ /* CR0 */
+ asm volatile ("mrc p15, 2, %0, c0, c0, 0" : "=r"(ctxt->cssr));
+ /* CR1 */
+ asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(ctxt->cr));
+ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r"(ctxt->actlr));
+ asm volatile ("mrc p15, 0, %0, c1, c0, 2" : "=r"(ctxt->cacr));
+#ifndef CONFIG_ARM_TRUSTZONE
+ asm volatile ("mrc p15, 0, %0, c1, c1, 1" : "=r"(ctxt->sder));
+ asm volatile ("mrc p15, 0, %0, c1, c1, 3" : "=r"(ctxt->vcr));
+#endif
+ /* CR2 */
+ asm volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r"(ctxt->ttb_0r));
+ asm volatile ("mrc p15, 0, %0, c2, c0, 1" : "=r"(ctxt->ttb_1r));
+ asm volatile ("mrc p15, 0, %0, c2, c0, 2" : "=r"(ctxt->ttbcr));
+ /* CR3 */
+ asm volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r"(ctxt->dacr));
+ /* CR5 */
+ asm volatile ("mrc p15, 0, %0, c5, c0, 0" : "=r"(ctxt->d_fsr));
+ asm volatile ("mrc p15, 0, %0, c5, c0, 1" : "=r"(ctxt->i_fsr));
+ asm volatile ("mrc p15, 0, %0, c5, c1, 0" : "=r"(ctxt->d_afsr));
+ asm volatile ("mrc p15, 0, %0, c5, c1, 1" : "=r"(ctxt->i_afsr));
+ /* CR6 */
+ asm volatile ("mrc p15, 0, %0, c6, c0, 0" : "=r"(ctxt->d_far));
+ asm volatile ("mrc p15, 0, %0, c6, c0, 2" : "=r"(ctxt->i_far));
+ /* CR7 */
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r"(ctxt->par));
+ /* CR9 */
+ asm volatile ("mrc p15, 0, %0, c9, c12, 0" : "=r"(ctxt->pmcontrolr));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 1" : "=r"(ctxt->cesr));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 2" : "=r"(ctxt->cecr));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 3" : "=r"(ctxt->ofsr));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 5" : "=r"(ctxt->pcsr));
+ asm volatile ("mrc p15, 0, %0, c9, c13, 0" : "=r"(ctxt->ccr));
+ asm volatile ("mrc p15, 0, %0, c9, c13, 1" : "=r"(ctxt->esr));
+ asm volatile ("mrc p15, 0, %0, c9, c13, 2" : "=r"(ctxt->pmcountr));
+ asm volatile ("mrc p15, 0, %0, c9, c14, 0" : "=r"(ctxt->uer));
+ asm volatile ("mrc p15, 0, %0, c9, c14, 1" : "=r"(ctxt->iesr));
+ asm volatile ("mrc p15, 0, %0, c9, c14, 2" : "=r"(ctxt->iecr));
+ /* CR10 */
+ asm volatile ("mrc p15, 0, %0, c10, c0, 0" : "=r"(ctxt->d_tlblr));
+ asm volatile ("mrc p15, 0, %0, c10, c2, 0" : "=r"(ctxt->prrr));
+ asm volatile ("mrc p15, 0, %0, c10, c2, 1" : "=r"(ctxt->nrrr));
+ /* CR11 */
+ /* CR12 */
+ asm volatile ("mrc p15, 0, %0, c12, c0, 0" : "=r"(ctxt->vbar));
+#ifndef CONFIG_ARM_TRUSTZONE
+ asm volatile ("mrc p15, 0, %0, c12, c0, 1" : "=r"(ctxt->mvbar));
+ asm volatile ("mrc p15, 0, %0, c12, c1, 1" : "=r"(ctxt->vir));
+#endif
+ /* CR13 */
+ asm volatile ("mrc p15, 0, %0, c13, c0, 0" : "=r"(ctxt->fcse));
+ asm volatile ("mrc p15, 0, %0, c13, c0, 1" : "=r"(ctxt->cid));
+ asm volatile ("mrc p15, 0, %0, c13, c0, 2" : "=r"(ctxt->urwtpid));
+ asm volatile ("mrc p15, 0, %0, c13, c0, 3" : "=r"(ctxt->urotpid));
+ asm volatile ("mrc p15, 0, %0, c13, c0, 4" : "=r"(ctxt->potpid));
+ /* CR15*/
+#ifndef CONFIG_ARM_TRUSTZONE
+ asm volatile ("mrc p15, 5, %0, c15, c7, 2" : "=r"(ctxt->mtlbar));
+#endif
+}
+
+static inline void __save_processor_state(union saved_context *ctxt)
+{
+ switch (arm_primary_part_number()) {
+ case PART_NUM_CORTEX_A8:
+ __save_processor_state_a8(&ctxt->cortex_a8);
+ break;
+ case PART_NUM_CORTEX_A9:
+ __save_processor_state_a9(&ctxt->cortex_a9);
+ break;
+ default:
+ WARN(true, "Hibernation is not supported for this processor.(%d)",
+ arm_primary_part_number());
+ }
+}
+
+static inline void __restore_processor_state_a8(
+ struct saved_context_cortex_a8 *ctxt)
+{
+ /* CR0 */
+ asm volatile ("mcr p15, 2, %0, c0, c0, 0" : : "r"(ctxt->cssr));
+ /* CR1 */
+ asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r"(ctxt->cr));
+ asm volatile ("mcr p15, 0, %0, c1, c0, 2" : : "r"(ctxt->cacr));
+ /* CR2 */
+ asm volatile ("mcr p15, 0, %0, c2, c0, 0" : : "r"(ctxt->ttb_0r));
+ asm volatile ("mcr p15, 0, %0, c2, c0, 1" : : "r"(ctxt->ttb_1r));
+ asm volatile ("mcr p15, 0, %0, c2, c0, 2" : : "r"(ctxt->ttbcr));
+ /* CR3 */
+ asm volatile ("mcr p15, 0, %0, c3, c0, 0" : : "r"(ctxt->dacr));
+ /* CR5 */
+ asm volatile ("mcr p15, 0, %0, c5, c0, 0" : : "r"(ctxt->d_fsr));
+ asm volatile ("mcr p15, 0, %0, c5, c0, 1" : : "r"(ctxt->i_fsr));
+ asm volatile ("mcr p15, 0, %0, c5, c1, 0" : : "r"(ctxt->d_afsr));
+ asm volatile ("mcr p15, 0, %0, c5, c1, 1" : : "r"(ctxt->i_afsr));
+ /* CR6 */
+ asm volatile ("mcr p15, 0, %0, c6, c0, 0" : : "r"(ctxt->d_far));
+ asm volatile ("mcr p15, 0, %0, c6, c0, 2" : : "r"(ctxt->i_far));
+ /* CR7 */
+ asm volatile ("mcr p15, 0, %0, c7, c4, 0" : : "r"(ctxt->par));
+ /* CR9 */
+ asm volatile ("mcr p15, 0, %0, c9, c12, 0" : : "r"(ctxt->pmcontrolr));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 1" : : "r"(ctxt->cesr));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 2" : : "r"(ctxt->cecr));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 3" : : "r"(ctxt->ofsr));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 4" : : "r"(ctxt->sir));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 5" : : "r"(ctxt->pcsr));
+ asm volatile ("mcr p15, 0, %0, c9, c13, 0" : : "r"(ctxt->ccr));
+ asm volatile ("mcr p15, 0, %0, c9, c13, 1" : : "r"(ctxt->esr));
+ asm volatile ("mcr p15, 0, %0, c9, c13, 2" : : "r"(ctxt->pmcountr));
+ asm volatile ("mcr p15, 0, %0, c9, c14, 0" : : "r"(ctxt->uer));
+ asm volatile ("mcr p15, 0, %0, c9, c14, 1" : : "r"(ctxt->iesr));
+ asm volatile ("mcr p15, 0, %0, c9, c14, 2" : : "r"(ctxt->iecr));
+ asm volatile ("mcr p15, 1, %0, c9, c0, 0" : : "r"(ctxt->l2clr));
+ /* CR10 */
+ asm volatile ("mcr p15, 0, %0, c10, c0, 0" : : "r"(ctxt->d_tlblr));
+ asm volatile ("mcr p15, 0, %0, c10, c0, 1" : : "r"(ctxt->i_tlblr));
+ asm volatile ("mcr p15, 0, %0, c10, c2, 0" : : "r"(ctxt->prrr));
+ asm volatile ("mcr p15, 0, %0, c10, c2, 1" : : "r"(ctxt->nrrr));
+ /* CR11 */
+ asm volatile ("mcr p15, 0, %0, c11, c1, 0" : : "r"(ctxt->pleuar));
+ asm volatile ("mcr p15, 0, %0, c11, c2, 0" : : "r"(ctxt->plecnr));
+ asm volatile ("mcr p15, 0, %0, c11, c4, 0" : : "r"(ctxt->plecr));
+ asm volatile ("mcr p15, 0, %0, c11, c5, 0" : : "r"(ctxt->pleisar));
+ asm volatile ("mcr p15, 0, %0, c11, c7, 0" : : "r"(ctxt->pleiear));
+ asm volatile ("mcr p15, 0, %0, c11, c15, 0" : : "r"(ctxt->plecidr));
+ /* CR12 */
+ asm volatile ("mcr p15, 0, %0, c12, c0, 0" : : "r"(ctxt->snsvbar));
+ /* CR13 */
+ asm volatile ("mcr p15, 0, %0, c13, c0, 0" : : "r"(ctxt->fcse));
+ asm volatile ("mcr p15, 0, %0, c13, c0, 1" : : "r"(ctxt->cid));
+ asm volatile ("mcr p15, 0, %0, c13, c0, 2" : : "r"(ctxt->urwtpid));
+ asm volatile ("mcr p15, 0, %0, c13, c0, 3" : : "r"(ctxt->urotpid));
+ asm volatile ("mcr p15, 0, %0, c13, c0, 4" : : "r"(ctxt->potpid));
+}
+
+static inline void __restore_processor_state_a9(
+ struct saved_context_cortex_a9 *ctxt)
+{
+ /* CR0 */
+ asm volatile ("mcr p15, 2, %0, c0, c0, 0" : : "r"(ctxt->cssr));
+ /* CR1 */
+ asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r"(ctxt->cr));
+ asm volatile ("mcr p15, 0, %0, c1, c0, 1" : : "r"(ctxt->actlr));
+ asm volatile ("mcr p15, 0, %0, c1, c0, 2" : : "r"(ctxt->cacr));
+#ifndef CONFIG_ARM_TRUSTZONE
+ asm volatile ("mcr p15, 0, %0, c1, c1, 1" : : "r"(ctxt->sder));
+ asm volatile ("mcr p15, 0, %0, c1, c1, 3" : : "r"(ctxt->vcr));
+#endif
+ /* CR2 */
+ asm volatile ("mcr p15, 0, %0, c2, c0, 0" : : "r"(ctxt->ttb_0r));
+ asm volatile ("mcr p15, 0, %0, c2, c0, 1" : : "r"(ctxt->ttb_1r));
+ asm volatile ("mcr p15, 0, %0, c2, c0, 2" : : "r"(ctxt->ttbcr));
+ /* CR3 */
+ asm volatile ("mcr p15, 0, %0, c3, c0, 0" : : "r"(ctxt->dacr));
+ /* CR5 */
+ asm volatile ("mcr p15, 0, %0, c5, c0, 0" : : "r"(ctxt->d_fsr));
+ asm volatile ("mcr p15, 0, %0, c5, c0, 1" : : "r"(ctxt->i_fsr));
+ asm volatile ("mcr p15, 0, %0, c5, c1, 0" : : "r"(ctxt->d_afsr));
+ asm volatile ("mcr p15, 0, %0, c5, c1, 1" : : "r"(ctxt->i_afsr));
+ /* CR6 */
+ asm volatile ("mcr p15, 0, %0, c6, c0, 0" : : "r"(ctxt->d_far));
+ asm volatile ("mcr p15, 0, %0, c6, c0, 2" : : "r"(ctxt->i_far));
+ /* CR7 */
+ asm volatile ("mcr p15, 0, %0, c7, c4, 0" : : "r"(ctxt->par));
+ /* CR9 */
+ asm volatile ("mcr p15, 0, %0, c9, c12, 0" : : "r"(ctxt->pmcontrolr));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 1" : : "r"(ctxt->cesr));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 2" : : "r"(ctxt->cecr));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 3" : : "r"(ctxt->ofsr));
+ asm volatile ("mcr p15, 0, %0, c9, c12, 5" : : "r"(ctxt->pcsr));
+ asm volatile ("mcr p15, 0, %0, c9, c13, 0" : : "r"(ctxt->ccr));
+ asm volatile ("mcr p15, 0, %0, c9, c13, 1" : : "r"(ctxt->esr));
+ asm volatile ("mcr p15, 0, %0, c9, c13, 2" : : "r"(ctxt->pmcountr));
+ asm volatile ("mcr p15, 0, %0, c9, c14, 0" : : "r"(ctxt->uer));
+ asm volatile ("mcr p15, 0, %0, c9, c14, 1" : : "r"(ctxt->iesr));
+ asm volatile ("mcr p15, 0, %0, c9, c14, 2" : : "r"(ctxt->iecr));
+ /* CR10 */
+ asm volatile ("mcr p15, 0, %0, c10, c0, 0" : : "r"(ctxt->d_tlblr));
+ asm volatile ("mcr p15, 0, %0, c10, c2, 0" : : "r"(ctxt->prrr));
+ asm volatile ("mcr p15, 0, %0, c10, c2, 1" : : "r"(ctxt->nrrr));
+ /* CR11 */
+ /* CR12 */
+ asm volatile ("mcr p15, 0, %0, c12, c0, 0" : : "r"(ctxt->vbar));
+#ifndef CONFIG_ARM_TRUSTZONE
+ asm volatile ("mcr p15, 0, %0, c12, c0, 1" : : "r"(ctxt->mvbar));
+ asm volatile ("mcr p15, 0, %0, c12, c1, 1" : : "r"(ctxt->vir));
+#endif
+ /* CR13 */
+ asm volatile ("mcr p15, 0, %0, c13, c0, 0" : : "r"(ctxt->fcse));
+ asm volatile ("mcr p15, 0, %0, c13, c0, 1" : : "r"(ctxt->cid));
+ asm volatile ("mcr p15, 0, %0, c13, c0, 2" : : "r"(ctxt->urwtpid));
+ asm volatile ("mcr p15, 0, %0, c13, c0, 3" : : "r"(ctxt->urotpid));
+ asm volatile ("mcr p15, 0, %0, c13, c0, 4" : : "r"(ctxt->potpid));
+ /* CR15 */
+#ifndef CONFIG_ARM_TRUSTZONE
+ asm volatile ("mcr p15, 5, %0, c15, c7, 2" : : "r"(ctxt->mtlbar));
+#endif
+}
+
+static inline void __restore_processor_state(union saved_context *ctxt)
+{
+ switch (arm_primary_part_number()) {
+ case PART_NUM_CORTEX_A8:
+ __restore_processor_state_a8(&ctxt->cortex_a8);
+ break;
+ case PART_NUM_CORTEX_A9:
+ __restore_processor_state_a9(&ctxt->cortex_a9);
+ break;
+ default:
+ WARN(true, "Hibernation is not supported for this processor.(%d)",
+ arm_primary_part_number());
+ }
+}
+
+void save_processor_state(void)
+{
+ preempt_disable();
+ __save_processor_state(&saved_context);
+}
+
+void restore_processor_state(void)
+{
+ __restore_processor_state(&saved_context);
+ preempt_enable();
+}
diff --git a/arch/arm/kernel/hibernate_asm.S b/arch/arm/kernel/hibernate_asm.S
new file mode 100644
index 0000000..9538789
--- /dev/null
+++ b/arch/arm/kernel/hibernate_asm.S
@@ -0,0 +1,139 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/linkage.h>
+ .text
+ENTRY(swsusp_arch_suspend)
+ /*
+ * Save current program status register
+ */
+ ldr r3, .Lsaved_cpsr
+ mrs r0, cpsr
+ str r0, [r3]
+
+ /*
+ * Change to system(user) mode
+ */
+ mov r1, r0
+ orr r1, r1, #0x1f
+ msr cpsr_c, r1
+
+ /*
+ * Save User context
+ */
+ ldr r3, .Lsaved_context_r0
+ stmia r3, {r0-r14}
+
+ /*
+ * Go back to original SVC mode
+ */
+ msr cpsr_c, r0
+
+ /*
+ * Save SVC context
+ */
+ ldr r3, .Lsaved_context_r13_svc
+ stmia r3, {r13-r14}
+ ldr r3, .Lsaved_spsr_svc
+ mrs r1, spsr
+ str r1, [r3]
+
+ bl swsusp_save
+
+ /*
+ * Restore return address
+ */
+ ldr r3, .Lsaved_context_r14_svc
+ ldr lr, [r3]
+ mov pc, lr
+ENDPROC(swsusp_arch_suspend)
+
+ENTRY(swsusp_arch_resume)
+ /*
+ * Restore_pblist is the starting point for loaded pages
+ */
+ ldr r0, .Lrestore_pblist
+ ldr r6, [r0]
+
+.Lcopy_loop:
+ ldr r4, [r6] /* src IOW present address */
+ ldr r5, [r6, #4] /* dst IOW original address*/
+
+ /* No. of entries in one page, where each entry is 4 bytes */
+ mov r9, #1024
+
+.Lcopy_one_page:
+ /*
+ * This loop could be optimized by using stm and ldm.
+ */
+ ldr r8, [r4], #4
+ str r8, [r5], #4
+ subs r9, r9, #1
+ bne .Lcopy_one_page
+
+ /*
+ * The last field of struct pbe is a pointer to the next pbe structure
+ */
+ ldr r6, [r6, #8]
+ cmp r6, #0
+ bne .Lcopy_loop
+
+ /*
+ * Restore SVC context
+ */
+ ldr r3, .Lsaved_context_r13_svc
+ ldmia r3, {r13-r14}
+ ldr r3, .Lsaved_spsr_svc
+ ldr r1, [r3]
+ msr spsr_cxsf, r1
+
+ mrs r0, cpsr /* Save current mode into r0 */
+
+ /*
+ * Change to system(user) mode
+ */
+ mov r1, r0
+ orr r1, r1, #0x1f
+ msr cpsr_c, r1
+
+ /*
+ * Restore User context
+ */
+ ldr r3, .Lsaved_context_r0
+ ldmia r3, {r0-r14}
+ ldr r3, .Lsaved_cpsr
+ ldr r1, [r3]
+ msr cpsr_cxsf, r1
+
+ msr cpsr_c, r0 /* Restore original mode from r0 */
+
+ /*
+ * Flush TLB (Invalidate unified TLB unlocked entries)
+ */
+ mov r1, #0
+ mcr p15, 0, r1, c8, c7, 0
+
+ /* Set the return value */
+ mov r0, #0
+
+ /* Restore return address */
+ ldr r3, .Lsaved_context_r14_svc
+ ldr lr, [r3]
+ mov pc, lr
+ENDPROC(swsusp_arch_resume)
+ .align 4
+.Lsaved_context_r0: .long saved_context_r0
+.Lsaved_cpsr: .long saved_cpsr
+.Lsaved_context_r13_svc: .long saved_context_r13_svc
+.Lsaved_context_r14_svc: .long saved_context_r14_svc
+.Lsaved_spsr_svc: .long saved_spsr_svc
+.Lrestore_pblist: .long restore_pblist
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
index 0f107dc..136e837 100644
--- a/arch/arm/kernel/leds.c
+++ b/arch/arm/kernel/leds.c
@@ -9,6 +9,8 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
@@ -101,6 +103,25 @@ static struct syscore_ops leds_syscore_ops = {
.resume = leds_resume,
};
+static int leds_idle_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ leds_event(led_idle_start);
+ break;
+ case IDLE_END:
+ leds_event(led_idle_end);
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block leds_idle_nb = {
+ .notifier_call = leds_idle_notifier,
+};
+
static int __init leds_init(void)
{
int ret;
@@ -109,8 +130,12 @@ static int __init leds_init(void)
ret = sysdev_register(&leds_device);
if (ret == 0)
ret = sysdev_create_file(&leds_device, &attr_event);
- if (ret == 0)
+
+ if (ret == 0) {
register_syscore_ops(&leds_syscore_ops);
+ idle_notifier_register(&leds_idle_nb);
+ }
+
return ret;
}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 2b5b142..3132699 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -662,6 +662,12 @@ init_hw_perf_events(void)
case 0xC090: /* Cortex-A9 */
armpmu = armv7_a9_pmu_init();
break;
+ case 0xC050: /* Cortex-A5 */
+ armpmu = armv7_a5_pmu_init();
+ break;
+ case 0xC0F0: /* Cortex-A15 */
+ armpmu = armv7_a15_pmu_init();
+ break;
}
/* Intel CPUs [xscale]. */
} else if (0x69 == implementor) {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4372763..462aefb 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -17,17 +17,23 @@
*/
#ifdef CONFIG_CPU_V7
-/* Common ARMv7 event types */
+/*
+ * Common ARMv7 event types
+ *
+ * Note: An implementation may not be able to count all of these events
+ * but the encodings are considered to be `reserved' in the case that
+ * they are not available.
+ */
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV7_PERFCTR_IFETCH_MISS = 0x01,
ARMV7_PERFCTR_ITLB_MISS = 0x02,
- ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
- ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
+ ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
+ ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
ARMV7_PERFCTR_DREAD = 0x06,
ARMV7_PERFCTR_DWRITE = 0x07,
-
+ ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV7_PERFCTR_CID_WRITE = 0x0B,
@@ -39,21 +45,30 @@ enum armv7_perf_types {
*/
ARMV7_PERFCTR_PC_WRITE = 0x0C,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
+ ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
+
+ /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
-
- ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
+ ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
+ ARMV7_PERFCTR_MEM_ACCESS = 0x13,
+ ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
+ ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
+ ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
+ ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
+ ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
+ ARMV7_PERFCTR_BUS_ACCESS = 0x19,
+ ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
+ ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
+ ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
+ ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
};
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
- ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
-
- ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
-
ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
@@ -138,6 +153,39 @@ enum armv7_a9_perf_types {
ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
};
+/* ARMv7 Cortex-A5 specific event types */
+enum armv7_a5_perf_types {
+ ARMV7_PERFCTR_IRQ_TAKEN = 0x86,
+ ARMV7_PERFCTR_FIQ_TAKEN = 0x87,
+
+ ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
+ ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
+ ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
+ ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
+ ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
+ ARMV7_PERFCTR_READ_ALLOC = 0xc5,
+
+ ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
+};
+
+/* ARMv7 Cortex-A15 specific event types */
+enum armv7_a15_perf_types {
+ ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40,
+ ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41,
+ ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42,
+ ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43,
+
+ ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C,
+ ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D,
+
+ ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50,
+ ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51,
+ ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52,
+ ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53,
+
+ ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76,
+};
+
/*
* Cortex-A8 HW events mapping
*
@@ -207,11 +255,6 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
},
[C(DTLB)] = {
- /*
- * Only ITLB misses and DTLB refills are supported.
- * If users want the DTLB refills misses a raw counter
- * must be used.
- */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
@@ -323,11 +366,6 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
},
[C(DTLB)] = {
- /*
- * Only ITLB misses and DTLB refills are supported.
- * If users want the DTLB refills misses a raw counter
- * must be used.
- */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
@@ -374,6 +412,242 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
};
/*
+ * Cortex-A5 HW events mapping
+ */
+static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_PREFETCH_LINEFILL,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ /*
+ * The prefetch counters don't differentiate between the I
+ * side and the D side.
+ */
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_PREFETCH_LINEFILL,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
+ * Cortex-A15 HW events mapping
+ */
+static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+};
+
+static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ /*
+ * Not all performance counters differentiate between read
+ * and write accesses/misses so we're not always strictly
+ * correct, but it's the best we can do. Writes and reads get
+ * combined in these cases.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
* Perf Events counters
*/
enum armv7_counters {
@@ -905,6 +1179,26 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
+
+static const struct arm_pmu *__init armv7_a5_pmu_init(void)
+{
+ armv7pmu.id = ARM_PERF_PMU_ID_CA5;
+ armv7pmu.name = "ARMv7 Cortex-A5";
+ armv7pmu.cache_map = &armv7_a5_perf_cache_map;
+ armv7pmu.event_map = &armv7_a5_perf_map;
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
+ return &armv7pmu;
+}
+
+static const struct arm_pmu *__init armv7_a15_pmu_init(void)
+{
+ armv7pmu.id = ARM_PERF_PMU_ID_CA15;
+ armv7pmu.name = "ARMv7 Cortex-A15";
+ armv7pmu.cache_map = &armv7_a15_perf_cache_map;
+ armv7pmu.event_map = &armv7_a15_perf_map;
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
+ return &armv7pmu;
+}
#else
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
@@ -915,4 +1209,14 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
+
+static const struct arm_pmu *__init armv7_a5_pmu_init(void)
+{
+ return NULL;
+}
+
+static const struct arm_pmu *__init armv7_a15_pmu_init(void)
+{
+ return NULL;
+}
#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 74ae833..e5cfa6a 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -30,9 +30,9 @@
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
+#include <linux/console.h>
#include <asm/cacheflush.h>
-#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/thread_notify.h>
@@ -62,6 +62,18 @@ static volatile int hlt_counter;
#include <mach/system.h>
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+ smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+ dump_stack();
+}
+#endif
+
void disable_hlt(void)
{
hlt_counter++;
@@ -91,8 +103,37 @@ static int __init hlt_setup(char *__unused)
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+ printk("\n");
+ pr_emerg("Restarting %s\n", linux_banner);
+ if (console_trylock()) {
+ console_unlock();
+ return;
+ }
+
+ mdelay(50);
+
+ local_irq_disable();
+ if (!console_trylock())
+ pr_emerg("arm_restart: Console was locked! Busting\n");
+ else
+ pr_emerg("arm_restart: Console was locked!\n");
+ console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
void arm_machine_restart(char mode, const char *cmd)
{
+ /* Flush the console to make sure all the relevant messages make it
+ * out to the console drivers */
+ arm_machine_flush_console();
+
/* Disable interrupts first */
local_irq_disable();
local_fiq_disable();
@@ -182,8 +223,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
+ idle_notifier_call_chain(IDLE_START);
tick_nohz_stop_sched_tick(1);
- leds_event(led_idle_start);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
@@ -210,8 +251,8 @@ void cpu_idle(void)
local_irq_enable();
}
}
- leds_event(led_idle_end);
tick_nohz_restart_sched_tick();
+ idle_notifier_call_chain(IDLE_END);
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -254,6 +295,77 @@ void machine_restart(char *cmd)
arm_pm_restart(reboot_mode, cmd);
}
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+ int i, j;
+ int nlines;
+ u32 *p;
+
+ /*
+ * don't attempt to dump non-kernel addresses or
+ * values that are probably just small negative numbers
+ */
+ if (addr < PAGE_OFFSET || addr > -256UL)
+ return;
+
+ printk("\n%s: %#lx:\n", name, addr);
+
+ /*
+ * round address down to a 32 bit boundary
+ * and always dump a multiple of 32 bytes
+ */
+ p = (u32 *)(addr & ~(sizeof(u32) - 1));
+ nbytes += (addr & (sizeof(u32) - 1));
+ nlines = (nbytes + 31) / 32;
+
+
+ for (i = 0; i < nlines; i++) {
+ /*
+ * just display low 16 bits of address to keep
+ * each line of the dump < 80 characters
+ */
+ printk("%04lx ", (unsigned long)p & 0xffff);
+ for (j = 0; j < 8; j++) {
+ u32 data;
+ if (probe_kernel_address(p, data)) {
+ printk(" ********");
+ } else {
+ printk(" %08x", data);
+ }
+ ++p;
+ }
+ printk("\n");
+ }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+ mm_segment_t fs;
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+ show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+ show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+ show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+ show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+ show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+ show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+ show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+ show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+ show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+ show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+ show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+ show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+ show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+ show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+ show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+ set_fs(fs);
+}
+
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
@@ -313,6 +425,8 @@ void __show_regs(struct pt_regs *regs)
printk("Control: %08x%s\n", ctrl, buf);
}
#endif
+
+ show_extra_register_data(regs, 128);
}
void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index acbb447..7cc11c0 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -979,6 +979,10 @@ static const char *hwcap_str[] = {
"neon",
"vfpv3",
"vfpv3d16",
+ "tls",
+ "vfpv4",
+ "idiva",
+ "idivt",
NULL
};
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e7f92a4..6893ec9 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -40,6 +40,8 @@
#include <asm/ptrace.h>
#include <asm/localtimer.h>
+#include <mach/sec_debug.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -53,6 +55,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_CPU_BACKTRACE,
};
int __cpuinit __cpu_up(unsigned int cpu)
@@ -271,6 +274,20 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
}
/*
+ * Skip the secondary calibration on architectures sharing clock
+ * with primary cpu. Archs can use ARCH_SKIP_SECONDARY_CALIBRATE
+ * for this.
+ */
+static inline int skip_secondary_calibrate(void)
+{
+#ifdef CONFIG_ARCH_SKIP_SECONDARY_CALIBRATE
+ return 0;
+#else
+ return -ENXIO;
+#endif
+}
+
+/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
@@ -301,19 +318,10 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
*/
platform_secondary_init(cpu);
- /*
- * Enable local interrupts.
- */
notify_cpu_starting(cpu);
- local_irq_enable();
- local_fiq_enable();
- /*
- * Setup the percpu timer for this CPU.
- */
- percpu_timer_setup();
-
- calibrate_delay();
+ if (skip_secondary_calibrate())
+ calibrate_delay();
smp_store_cpu_info(cpu);
@@ -323,8 +331,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* before we continue.
*/
set_cpu_online(cpu, true);
- while (!cpu_active(cpu))
- cpu_relax();
+
+ /*
+ * Setup the percpu timer for this CPU.
+ */
+ percpu_timer_setup();
+
+ local_irq_enable();
+ local_fiq_enable();
/*
* OK, it's off to the idle thread for us
@@ -405,6 +419,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -445,9 +460,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
static void ipi_timer(void)
{
struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
- irq_enter();
evt->event_handler(evt);
- irq_exit();
}
#ifdef CONFIG_LOCAL_TIMERS
@@ -458,8 +471,13 @@ asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
if (local_timer_ack()) {
__inc_irq_stat(cpu, local_timer_irqs);
+ sec_debug_irq_log(0, do_local_timer, 1);
+ irq_enter();
ipi_timer();
- }
+ irq_exit();
+ sec_debug_irq_log(0, do_local_timer, 2);
+ } else
+ sec_debug_irq_log(0, do_local_timer, 3);
set_irq_regs(old_regs);
}
@@ -551,10 +569,65 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable();
local_irq_disable();
+ flush_cache_all();
+ local_flush_tlb_all();
+
while (1)
cpu_relax();
}
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ int i;
+
+ if (test_and_set_bit(0, &backtrace_flag))
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ return;
+
+ cpumask_copy(&backtrace_mask, cpu_online_mask);
+ cpu_clear(this_cpu, backtrace_mask);
+
+ pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+ dump_stack();
+
+ pr_info("\nsending IPI to all other CPUs:\n");
+ smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+ /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(&backtrace_mask))
+ break;
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+ if (cpu_isset(cpu, backtrace_mask)) {
+ raw_spin_lock(&backtrace_lock);
+ pr_warning("IPI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ raw_spin_unlock(&backtrace_lock);
+ cpu_clear(cpu, backtrace_mask);
+ }
+}
+
/*
* Main handler for inter-processor interrupts
*/
@@ -566,9 +639,13 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
+ sec_debug_irq_log(ipinr, do_IPI, 1);
+
switch (ipinr) {
case IPI_TIMER:
+ irq_enter();
ipi_timer();
+ irq_exit();
break;
case IPI_RESCHEDULE:
@@ -576,15 +653,25 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
+ irq_enter();
generic_smp_call_function_interrupt();
+ irq_exit();
break;
case IPI_CALL_FUNC_SINGLE:
+ irq_enter();
generic_smp_call_function_single_interrupt();
+ irq_exit();
break;
case IPI_CPU_STOP:
+ irq_enter();
ipi_cpu_stop(cpu);
+ irq_exit();
+ break;
+
+ case IPI_CPU_BACKTRACE:
+ ipi_cpu_backtrace(cpu, regs);
break;
default:
@@ -592,6 +679,9 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
cpu, ipinr);
break;
}
+
+ sec_debug_irq_log(ipinr, do_IPI, 2);
+
set_irq_regs(old_regs);
}
@@ -627,3 +717,13 @@ int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
+
+static void flush_all_cpu_cache(void *info)
+{
+ flush_cache_all();
+}
+
+void flush_all_cpu_caches(void)
+{
+ on_each_cpu(flush_all_cpu_cache, NULL, 1);
+}
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index cb7dd40..1936649 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -15,12 +15,18 @@
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <plat/cpu.h>
+
#define SCU_CTRL 0x00
#define SCU_CONFIG 0x04
#define SCU_CPU_STATUS 0x08
#define SCU_INVALIDATE 0x0c
#define SCU_FPGA_REVISION 0x10
+#ifdef CONFIG_MACH_PX
+extern void logbuf_force_unlock(void);
+#endif
+
/*
* Get the number of CPU cores from the SCU configuration
*/
@@ -33,7 +39,7 @@ unsigned int __init scu_get_core_count(void __iomem *scu_base)
/*
* Enable the SCU
*/
-void __init scu_enable(void __iomem *scu_base)
+void scu_enable(void __iomem *scu_base)
{
u32 scu_ctrl;
@@ -51,6 +57,10 @@ void __init scu_enable(void __iomem *scu_base)
if (scu_ctrl & 1)
return;
+ if ((soc_is_exynos4412() && (samsung_rev() >= EXYNOS4412_REV_1_0)) ||
+ soc_is_exynos4210())
+ scu_ctrl |= (1<<3);
+
scu_ctrl |= 1;
__raw_writel(scu_ctrl, scu_base + SCU_CTRL);
@@ -59,6 +69,10 @@ void __init scu_enable(void __iomem *scu_base)
* initialised is visible to the other CPUs.
*/
flush_cache_all();
+
+#ifdef CONFIG_MACH_PX
+ logbuf_force_unlock();
+#endif
}
/*
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 381d23a..b3337e7 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -94,20 +94,19 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
if (tsk != current) {
#ifdef CONFIG_SMP
/*
- * What guarantees do we have here that 'tsk' is not
- * running on another CPU? For now, ignore it as we
- * can't guarantee we won't explode.
+ * What guarantees do we have here that 'tsk'
+ * is not running on another CPU?
+ *
+ * We guarantee that this function will be used for
+ * latencytop only :-)
*/
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
- return;
-#else
+ /* BUG(); */
+#endif
data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(tsk);
-#endif
} else {
register unsigned long current_sp asm ("sp");
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6807cb1..56b2715 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -451,7 +451,9 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
if (end > vma->vm_end)
end = vma->vm_end;
- flush_cache_user_range(vma, start, end);
+ up_read(&mm->mmap_sem);
+ flush_cache_user_range(start, end);
+ return;
}
up_read(&mm->mmap_sem);
}