From 71efb063f4a145ae420be054f5a91dcf7c19b375 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 18 Feb 2011 16:21:06 +0100 Subject: ARM: 6742/1: pmu: avoid setting IRQ affinity on UP systems Now that we can execute a CONFIG_SMP kernel on a uniprocessor system, extra care has to be taken in the PMU IRQ affinity setting code to ensure that we don't always fail to initialise. This patch changes the CPU PMU initialisation code so that when we only have a single IRQ, whose affinity can not be changed at the controller, we report success (0) rather than -EINVAL. Reported-by: Avik Sil Acked-by: Jamie Iles Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/pmu.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96e..2c79eec 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -97,28 +97,34 @@ set_irq_affinity(int irq, irq, cpu); return err; #else - return 0; + return -EINVAL; #endif } static int init_cpu_pmu(void) { - int i, err = 0; + int i, irqs, err = 0; struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; - if (!pdev) { - err = -ENODEV; - goto out; - } + if (!pdev) + return -ENODEV; + + irqs = pdev->num_resources; + + /* + * If we have a single PMU interrupt that we can't shift, assume that + * we're running on a uniprocessor machine and continue. + */ + if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) + return 0; - for (i = 0; i < pdev->num_resources; ++i) { + for (i = 0; i < irqs; ++i) { err = set_irq_affinity(platform_get_irq(pdev, i), i); if (err) break; } -out: return err; } -- cgit v1.1 From 315cfe7835c9a3fe27f15519bdeee8bf0a293e33 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 15 Feb 2011 18:06:57 +0100 Subject: ARM: 6676/1: Correct the cpu_architecture() function for ARMv7 If ID_MMFR0[3:0] >= 3, the architecture version is ARMv7. The code was currently only testing for ID_MMFR0[3:0] == 3. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6..5ea4fb7 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -226,8 +226,8 @@ int cpu_architecture(void) * Register 0 and check for VMSAv7 or PMSAv7 */ asm("mrc p15, 0, %0, c0, c1, 4" : "=r" (mmfr0)); - if ((mmfr0 & 0x0000000f) == 0x00000003 || - (mmfr0 & 0x000000f0) == 0x00000030) + if ((mmfr0 & 0x0000000f) >= 0x00000003 || + (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; else if ((mmfr0 & 0x0000000f) == 0x00000002 || (mmfr0 & 0x000000f0) == 0x00000020) -- cgit v1.1 From dc810efb0ca5702c9d96782b99282d4b4383e877 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Wed, 16 Feb 2011 18:54:01 +0100 Subject: ARM: 6740/1: Place correctly notes section in the linker script Commit 18991197b4b588255ccabf472ebc84db7b66a19c added --build-id linker option when toolchain supports it. ARM one does, but for some reason places the section at 0 when linker script doesn't mention it explicitly. The 1e621a8e3752367d4aae78a8ab00a18fb2793f34 worked around the problem removing this section from binary image with explicit objcopy options, but it still exists in vmlinux, confusing tools like debuggers and perf. This problem was discussed here: http://lists.infradead.org/pipermail/linux-arm-kernel/2010-May/015994.html http://lists.infradead.org/pipermail/linux-arm-kernel/2010-May/015994.html but the proposed changes to the linker script were substantial. This patch simply places NOTES (36 bytes long, at least when compiled with CodeSourcery toolchain) between data and bss, which seem to be the right place (and suggested by the sample linker script in include/asm-generic/vmlinux.lds.h). It is enough to place it correctly in vmlinux (so debuggers are happy): Section Headers: [11] .data PROGBITS c07ce000 7ce000 020fc0 00 WA 0 0 32 [12] .notes NOTE c07eefc0 7eefc0 000024 00 AX 0 0 4 [13] .bss NOBITS c07ef000 7eefe4 01e628 00 WA 0 0 32 Program Headers: LOAD 0x008000 0xc0008000 0xc0008000 0x7e6fe4 0x805628 RWE 0x8000 NOTE 0x7eefc0 0xc07eefc0 0xc07eefc0 0x00024 0x00024 R E 0x4 Section to Segment mapping: Segment Sections... 00 <...> .data .notes .bss 01 .notes and to get it exposed as /sys/kernel/notes used by perf tools. Signed-off-by: Pawel Moll Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3..558bd81 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -247,6 +247,8 @@ SECTIONS } #endif + NOTES + BSS_SECTION(0, 0, 0) _end = .; -- cgit v1.1 From 53399053eb505cf541b2405bd9d9bca5ecfb96fb Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 20 Feb 2011 12:22:52 +0000 Subject: ARM: Ensure predictable endian state on signal handler entry Ensure a predictable endian state when entering signal handlers. This avoids programs which use SETEND to momentarily switch their endian state from having their signal handlers entered with an unpredictable endian state. Cc: Acked-by: Dave Martin Signed-off-by: Russell King --- arch/arm/kernel/signal.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a6..abaf844 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; - unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; + unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); + + cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. -- cgit v1.1 From a9ad21fed09cb95d34af9474be0831525b30c4c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 21 Feb 2011 10:13:36 +0000 Subject: ARM: Keep exit text/data around for SMP_ON_UP When SMP_ON_UP is used and the spinlocks are inlined, we end up with inline spinlocks in the exit code, with references from the SMP alternatives section to the exit sections. This causes link time errors. Avoid this by placing the exit sections in the init-discarded region. Cc: Tested-by: Dave Martin Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 558bd81..6146279 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -21,6 +21,12 @@ #define ARM_CPU_KEEP(x) #endif +#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) +#define ARM_EXIT_KEEP(x) x +#else +#define ARM_EXIT_KEEP(x) +#endif + OUTPUT_ARCH(arm) ENTRY(stext) @@ -43,6 +49,7 @@ SECTIONS _sinittext = .; HEAD_TEXT INIT_TEXT + ARM_EXIT_KEEP(EXIT_TEXT) _einittext = .; ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; @@ -67,6 +74,7 @@ SECTIONS #ifndef CONFIG_XIP_KERNEL __init_begin = _stext; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) #endif } @@ -162,6 +170,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) . = ALIGN(PAGE_SIZE); __init_end = .; #endif -- cgit v1.1 From 5a5af730536fbf15fc354980cba2a0400afa6b76 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 21 Feb 2011 04:37:20 +0100 Subject: ARM: 6745/1: kprobes insn decoding fix Marcin Slusarz says: > In arch/arm/kernel/kprobes-decode.c there's a function > arm_kprobe_decode_insn which does: > > } else if ((insn & 0x0e000000) == 0x0c400000) { > ... > > This is always false, so code below is dead. > I found this bug by coccinelle (http://coccinelle.lip6.fr/). Reported-by: Marcin Slusarz Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/kprobes-decode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f005..8f6ed43 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) return space_cccc_1100_010x(insn, asi); - } else if ((insn & 0x0e000000) == 0x0c400000) { + } else if ((insn & 0x0e000000) == 0x0c000000) { return space_cccc_110x(insn, asi); -- cgit v1.1 From 868d172b8ac23070418ec6265195e88e8d5dbe92 Mon Sep 17 00:00:00 2001 From: Eric Cooper Date: Wed, 2 Feb 2011 17:16:09 -0500 Subject: [ARM] add machine-specific hook to machine_kexec Provide the option to call a machine-specific function before kexec'ing a new kernel. Signed-off-by: Eric Cooper Signed-off-by: Nicolas Pitre --- arch/arm/kernel/machine_kexec.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 30ead13..e59bbd4 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -75,6 +75,11 @@ void machine_crash_shutdown(struct pt_regs *regs) printk(KERN_INFO "Loading crashdump kernel...\n"); } +/* + * Function pointer to optional machine-specific reinitialization + */ +void (*kexec_reinit)(void); + void machine_kexec(struct kimage *image) { unsigned long page_list; @@ -104,6 +109,8 @@ void machine_kexec(struct kimage *image) (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); + if (kexec_reinit) + kexec_reinit(); local_irq_disable(); local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ -- cgit v1.1