aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-12-22 22:46:24 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-12-22 22:46:24 +0000
commit25cf0398bdf365d027e171116aa4a281e9cd3c1c (patch)
treea8be3d3d314625a376be0e62c67809dc22274dd4 /arch/arm/kernel
parent9326845f45650f6af9953a4b6a31e89b54fab82f (diff)
parent59bdd133561a432c4655146e283caf85fa64c2fb (diff)
downloadkernel_samsung_smdk4412-25cf0398bdf365d027e171116aa4a281e9cd3c1c.zip
kernel_samsung_smdk4412-25cf0398bdf365d027e171116aa4a281e9cd3c1c.tar.gz
kernel_samsung_smdk4412-25cf0398bdf365d027e171116aa4a281e9cd3c1c.tar.bz2
Merge branch 'devel' of git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6 into devel-stable
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head.S7
-rw-r--r--arch/arm/kernel/iwmmxt.S55
-rw-r--r--arch/arm/kernel/pj4-cp0.c94
-rw-r--r--arch/arm/kernel/relocate_kernel.S2
6 files changed, 147 insertions, 14 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 679851a..c73abe4 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -51,6 +51,7 @@ AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
+obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 34bbef0..36199ff 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -915,7 +915,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
- ldr r7, =1f @ it's 20 bits
+ ldr r7, 1f @ it's 20 bits
swi __ARM_NR_cmpxchg
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index dd6b369..6bd82d2 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -85,9 +85,11 @@ ENTRY(stext)
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
+ THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p'
bl __lookup_machine_type @ r5=machinfo
movs r8, r5 @ invalid machine (r5=0)?
+ THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_a @ yes, error 'a'
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
@@ -262,6 +264,7 @@ __create_page_tables:
mov pc, lr
ENDPROC(__create_page_tables)
.ltorg
+ .align
__enable_mmu_loc:
.long .
.long __enable_mmu
@@ -282,6 +285,7 @@ ENTRY(secondary_startup)
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
moveq r0, #'p' @ yes, error 'p'
+ THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p
/*
@@ -308,6 +312,8 @@ ENTRY(__secondary_switched)
b secondary_start_kernel
ENDPROC(__secondary_switched)
+ .align
+
.type __secondary_data, %object
__secondary_data:
.long .
@@ -413,6 +419,7 @@ __fixup_smp_on_up:
mov pc, lr
ENDPROC(__fixup_smp)
+ .align
1: .word .
.word __smpalt_begin
.word __smpalt_end
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index b63b528..7fa3bb0 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -19,6 +19,14 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#if defined(CONFIG_CPU_PJ4)
+#define PJ4(code...) code
+#define XSC(code...)
+#else
+#define PJ4(code...)
+#define XSC(code...) code
+#endif
+
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
#define MMX_WR2 (0x10)
@@ -58,11 +66,17 @@
ENTRY(iwmmxt_task_enable)
- mrc p15, 0, r2, c15, c1, 0
- tst r2, #0x3 @ CP0 and CP1 accessible?
+ XSC(mrc p15, 0, r2, c15, c1, 0)
+ PJ4(mrc p15, 0, r2, c1, c0, 2)
+ @ CP0 and CP1 accessible?
+ XSC(tst r2, #0x3)
+ PJ4(tst r2, #0xf)
movne pc, lr @ if so no business here
- orr r2, r2, #0x3 @ enable access to CP0 and CP1
- mcr p15, 0, r2, c15, c1, 0
+ @ enable access to CP0 and CP1
+ XSC(orr r2, r2, #0x3)
+ XSC(mcr p15, 0, r2, c15, c1, 0)
+ PJ4(orr r2, r2, #0xf)
+ PJ4(mcr p15, 0, r2, c1, c0, 2)
ldr r3, =concan_owner
add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
@@ -179,17 +193,26 @@ ENTRY(iwmmxt_task_disable)
teqne r1, r2 @ or specified one?
bne 1f @ no: quit
- mrc p15, 0, r4, c15, c1, 0
- orr r4, r4, #0x3 @ enable access to CP0 and CP1
- mcr p15, 0, r4, c15, c1, 0
+ @ enable access to CP0 and CP1
+ XSC(mrc p15, 0, r4, c15, c1, 0)
+ XSC(orr r4, r4, #0xf)
+ XSC(mcr p15, 0, r4, c15, c1, 0)
+ PJ4(mrc p15, 0, r4, c1, c0, 2)
+ PJ4(orr r4, r4, #0x3)
+ PJ4(mcr p15, 0, r4, c1, c0, 2)
+
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
bl concan_save
- bic r4, r4, #0x3 @ disable access to CP0 and CP1
- mcr p15, 0, r4, c15, c1, 0
+ @ disable access to CP0 and CP1
+ XSC(bic r4, r4, #0x3)
+ XSC(mcr p15, 0, r4, c15, c1, 0)
+ PJ4(bic r4, r4, #0xf)
+ PJ4(mcr p15, 0, r4, c1, c0, 2)
+
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
@@ -277,8 +300,11 @@ ENTRY(iwmmxt_task_restore)
*/
ENTRY(iwmmxt_task_switch)
- mrc p15, 0, r1, c15, c1, 0
- tst r1, #0x3 @ CP0 and CP1 accessible?
+ XSC(mrc p15, 0, r1, c15, c1, 0)
+ PJ4(mrc p15, 0, r1, c1, c0, 2)
+ @ CP0 and CP1 accessible?
+ XSC(tst r1, #0x3)
+ PJ4(tst r1, #0xf)
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
@@ -287,8 +313,11 @@ ENTRY(iwmmxt_task_switch)
teq r2, r3 @ next task owns it?
movne pc, lr @ no: leave Concan disabled
-1: eor r1, r1, #3 @ flip Concan access
- mcr p15, 0, r1, c15, c1, 0
+1: @ flip Conan access
+ XSC(eor r1, r1, #0x3)
+ XSC(mcr p15, 0, r1, c15, c1, 0)
+ PJ4(eor r1, r1, #0xf)
+ PJ4(mcr p15, 0, r1, c1, c0, 2)
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
new file mode 100644
index 0000000..a4b1b07
--- /dev/null
+++ b/arch/arm/kernel/pj4-cp0.c
@@ -0,0 +1,94 @@
+/*
+ * linux/arch/arm/kernel/pj4-cp0.c
+ *
+ * PJ4 iWMMXt coprocessor context switching and handling
+ *
+ * Copyright (c) 2010 Marvell International Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/thread_notify.h>
+
+static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+ struct thread_info *thread = t;
+
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+ /*
+ * flush_thread() zeroes thread->fpstate, so no need
+ * to do anything here.
+ *
+ * FALLTHROUGH: Ensure we don't try to overwrite our newly
+ * initialised state information on the first fault.
+ */
+
+ case THREAD_NOTIFY_EXIT:
+ iwmmxt_task_release(thread);
+ break;
+
+ case THREAD_NOTIFY_SWITCH:
+ iwmmxt_task_switch(thread);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block iwmmxt_notifier_block = {
+ .notifier_call = iwmmxt_do,
+};
+
+
+static u32 __init pj4_cp_access_read(void)
+{
+ u32 value;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c1, c0, 2\n\t"
+ : "=r" (value));
+ return value;
+}
+
+static void __init pj4_cp_access_write(u32 value)
+{
+ u32 temp;
+
+ __asm__ __volatile__ (
+ "mcr p15, 0, %1, c1, c0, 2\n\t"
+ "mrc p15, 0, %0, c1, c0, 2\n\t"
+ "mov %0, %0\n\t"
+ "sub pc, pc, #4\n\t"
+ : "=r" (temp) : "r" (value));
+}
+
+
+/*
+ * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
+ * switch code handle iWMMXt context switching.
+ */
+static int __init pj4_cp0_init(void)
+{
+ u32 cp_access;
+
+ cp_access = pj4_cp_access_read() & ~0xf;
+ pj4_cp_access_write(cp_access);
+
+ printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n");
+ elf_hwcap |= HWCAP_IWMMXT;
+ thread_register_notifier(&iwmmxt_notifier_block);
+
+ return 0;
+}
+
+late_initcall(pj4_cp0_init);
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index fd26f8d..9cf4cbf 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -59,6 +59,8 @@ relocate_new_kernel:
ldr r2,kexec_boot_atags
mov pc,lr
+ .align
+
.globl kexec_start_address
kexec_start_address:
.long 0x0