From 6007ca9964e4607fd5835be19391b1d85ebc07a4 Mon Sep 17 00:00:00 2001 From: Tim Bird Date: Wed, 2 May 2012 22:55:39 +0100 Subject: ARM: 7410/1: Add extra clobber registers for assembly in kernel_execve commit e787ec1376e862fcea1bfd523feb7c5fb43ecdb9 upstream. The inline assembly in kernel_execve() uses r8 and r9. Since this code sequence does not return, it usually doesn't matter if the register clobber list is accurate. However, I saw a case where a particular version of gcc used r8 as an intermediate for the value eventually passed to r9. Because r8 is used in the inline assembly, and not mentioned in the clobber list, r9 was set to an incorrect value. This resulted in a kernel panic on execution of the first user-space program in the system. r9 is used in ret_to_user as the thread_info pointer, and if it's wrong, bad things happen. Signed-off-by: Tim Bird Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/sys_arm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 62e7c61..0264ab4 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -115,7 +115,7 @@ int kernel_execve(const char *filename, "Ir" (THREAD_START_SP - sizeof(regs)), "r" (®s), "Ir" (sizeof(regs)) - : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); + : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory"); out: return ret; -- cgit v1.1 From cb68af469fa453694a03553c6efd0681f2c5264f Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Sat, 5 May 2012 20:58:13 +0100 Subject: ARM: 7414/1: SMP: prevent use of the console when using idmap_pgd commit fde165b2a29673aabf18ceff14dea1f1cfb0daad upstream. Commit 4e8ee7de227e3ab9a72040b448ad728c5428a042 (ARM: SMP: use idmap_pgd for mapping MMU enable during secondary booting) switched secondary boot to use idmap_pgd, which is initialized during early_initcall, instead of a page table initialized during __cpu_up. This causes idmap_pgd to contain the static mappings but be missing all dynamic mappings. If a console is registered that creates a dynamic mapping, the printk in secondary_start_kernel will trigger a data abort on the missing mapping before the exception handlers have been initialized, leading to a hang. Initial boot is not affected because no consoles have been registered, and resume is usually not affected because the offending console is suspended. Onlining a cpu with hotplug triggers the problem. A workaround is to the printk in secondary_start_kernel until after the page tables have been switched back to init_mm. Signed-off-by: Colin Cross Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/smp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e7f92a4..fea97f6 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -279,8 +279,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); - printk("CPU%u: Booted secondary processor\n", cpu); - /* * All kernel threads share the same mm context; grab a * reference and switch to it. @@ -292,6 +290,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) enter_lazy_tlb(mm, current); local_flush_tlb_all(); + printk("CPU%u: Booted secondary processor\n", cpu); + cpu_init(); preempt_disable(); trace_hardirqs_off(); -- cgit v1.1 From 866cd76c93e668389a8edb4eece7b900ac386b5e Mon Sep 17 00:00:00 2001 From: Dima Zavin Date: Thu, 29 Mar 2012 20:44:06 +0100 Subject: ARM: 7365/1: drop unused parameter from flush_cache_user_range MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 4542b6a0fa6b48d9ae6b41c1efeb618b7a221b2a upstream. vma isn't used and flush_cache_user_range isn't a standard macro that is used on several archs with the same prototype. In fact only unicore32 has a macro with the same name (with an identical implementation and no in-tree users). This is a part of a patch proposed by Dima Zavin (with Message-id: 1272439931-12795-1-git-send-email-dima@android.com) that didn't get accepted. Cc: Dima Zavin Acked-by: Catalin Marinas Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King Cc: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 6807cb1..c886033 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -451,7 +451,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) if (end > vma->vm_end) end = vma->vm_end; - flush_cache_user_range(vma, start, end); + flush_cache_user_range(start, end); } up_read(&mm->mmap_sem); } -- cgit v1.1 From ab55458eb0cc7f0f23e38ed41828e9b5a5372134 Mon Sep 17 00:00:00 2001 From: Dima Zavin Date: Mon, 30 Apr 2012 10:26:14 +0100 Subject: ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held commit 435a7ef52db7d86e67a009b36cac1457f8972391 upstream. We can't be holding the mmap_sem while calling flush_cache_user_range because the flush can fault. If we fault on a user address, the page fault handler will try to take mmap_sem again. Since both places acquire the read lock, most of the time it succeeds. However, if another thread tries to acquire the write lock on the mmap_sem (e.g. mmap) in between the call to flush_cache_user_range and the fault, the down_read in do_page_fault will deadlock. [will: removed drop of vma parameter as already queued by rmk (7365/1)] Acked-by: Catalin Marinas Signed-off-by: Dima Zavin Signed-off-by: John Stultz Signed-off-by: Will Deacon Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/traps.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c886033..56b2715 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -451,7 +451,9 @@ do_cache_op(unsigned long start, unsigned long end, int flags) if (end > vma->vm_end) end = vma->vm_end; + up_read(&mm->mmap_sem); flush_cache_user_range(start, end); + return; } up_read(&mm->mmap_sem); } -- cgit v1.1 From d1877392b4dce5d4c74d329777980d2a3ccede99 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 19 Jan 2012 15:20:58 +0000 Subject: ARM: fix rcu stalls on SMP platforms commit 7deabca0acfe02b8e18f59a4c95676012f49a304 upstream. We can stall RCU processing on SMP platforms if a CPU sits in its idle loop for a long time. This happens because we don't call irq_enter() and irq_exit() around generic_smp_call_function_interrupt() and friends. Add the necessary calls, and remove the one from within ipi_timer(), so that they're all in a common place. Signed-off-by: Russell King [add irq_enter()/irq_exit() in do_local_timer] Signed-off-by: UCHINO Satoshi Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/smp.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index fea97f6..4469924 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -445,9 +445,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); static void ipi_timer(void) { struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); - irq_enter(); evt->event_handler(evt); - irq_exit(); } #ifdef CONFIG_LOCAL_TIMERS @@ -458,7 +456,9 @@ asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs) if (local_timer_ack()) { __inc_irq_stat(cpu, local_timer_irqs); + irq_enter(); ipi_timer(); + irq_exit(); } set_irq_regs(old_regs); @@ -568,7 +568,9 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) switch (ipinr) { case IPI_TIMER: + irq_enter(); ipi_timer(); + irq_exit(); break; case IPI_RESCHEDULE: @@ -576,15 +578,21 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) break; case IPI_CALL_FUNC: + irq_enter(); generic_smp_call_function_interrupt(); + irq_exit(); break; case IPI_CALL_FUNC_SINGLE: + irq_enter(); generic_smp_call_function_single_interrupt(); + irq_exit(); break; case IPI_CPU_STOP: + irq_enter(); ipi_cpu_stop(cpu); + irq_exit(); break; default: -- cgit v1.1 From 387373e6448530b13056d4e32dfe2a5bf15c9aaf Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 19 Oct 2012 17:53:01 +0100 Subject: ARM: 7559/1: smp: switch away from the idmap before updating init_mm.mm_count commit 5f40b909728ad784eb43aa309d3c4e9bdf050781 upstream. When booting a secondary CPU, the primary CPU hands two sets of page tables via the secondary_data struct: (1) swapper_pg_dir: a normal, cacheable, shared (if SMP) mapping of the kernel image (i.e. the tables used by init_mm). (2) idmap_pgd: an uncached mapping of the .idmap.text ELF section. The idmap is generally used when enabling and disabling the MMU, which includes early CPU boot. In this case, the secondary CPU switches to swapper as soon as it enters C code: struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_switch_mm(mm->pgd, mm); This causes a problem on ARMv7, where the identity mapping is treated as strongly-ordered leading to architecturally UNPREDICTABLE behaviour of exclusive accesses, such as those used by atomic_inc. This patch re-orders the secondary_start_kernel function so that we switch to swapper before performing any exclusive accesses. Reported-by: Gilles Chanteperdrix Cc: David McKay Signed-off-by: Will Deacon Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/smp.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 4469924..511eb03 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -277,18 +277,24 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu = smp_processor_id(); + unsigned int cpu; + + /* + * The identity mapping is uncached (strongly ordered), so + * switch away from it before attempting any exclusive accesses. + */ + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ + cpu = smp_processor_id(); atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); - cpu_switch_mm(mm->pgd, mm); - enter_lazy_tlb(mm, current); - local_flush_tlb_all(); printk("CPU%u: Booted secondary processor\n", cpu); -- cgit v1.1 From 0b6916a71e85cd2e51c7cc3062814374a42753b9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Dec 2012 00:25:57 +0000 Subject: ARM: missing ->mmap_sem around find_vma() in swp_emulate.c commit 7bf9b7bef881aac820bf1f2e9951a17b09bd7e04 upstream. find_vma() is *not* safe when somebody else is removing vmas. Not just the return value might get bogus just as you are getting it (this instance doesn't try to dereference the resulting vma), the search itself can get buggered in rather spectacular ways. IOW, ->mmap_sem really, really is not optional here. Signed-off-by: Al Viro Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/swp_emulate.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 40ee7e5..0951a32 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -108,10 +108,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) { siginfo_t info; + down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, addr) == NULL) info.si_code = SEGV_MAPERR; else info.si_code = SEGV_ACCERR; + up_read(¤t->mm->mmap_sem); info.si_signo = SIGSEGV; info.si_errno = 0; -- cgit v1.1