From 5756e9dd0de6d5c307773f8f734c0684b3098fdd Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 26 Jan 2011 18:34:26 +0100 Subject: ARM: 6640/1: Thumb-2: Symbol manipulation macros for function body copying In low-level board support code, there is sometimes a need to copy a function body to another location at run-time. A straightforward call to memcpy doesn't work in Thumb-2, because bit 0 of external Thumb function symbols is set to 1, indicating that the function is Thumb. Without corrective measures, this will cause an off-by-one copy, and the copy may be called using the wrong instruction set. This patch adds an fncpy() macro to help with such copies. Particular care is needed, because C doesn't guarantee any defined behaviour when casting a function pointer to any other type. This has been observed to lead to strange optimisation side-effects when doing the arithmetic which is required in order to copy/move function bodies correctly in Thumb-2. Thanks to Russell King and Nicolas Pitre for their input on this patch. Signed-off-by: Dave Martin Tested-by: Jean Pihet Tested-by: Tony Lindgren Tested-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/include/asm/fncpy.h | 94 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 arch/arm/include/asm/fncpy.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h new file mode 100644 index 0000000..de53547 --- /dev/null +++ b/arch/arm/include/asm/fncpy.h @@ -0,0 +1,94 @@ +/* + * arch/arm/include/asm/fncpy.h - helper macros for function body copying + * + * Copyright (C) 2011 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * These macros are intended for use when there is a need to copy a low-level + * function body into special memory. + * + * For example, when reconfiguring the SDRAM controller, the code doing the + * reconfiguration may need to run from SRAM. + * + * NOTE: that the copied function body must be entirely self-contained and + * position-independent in order for this to work properly. + * + * NOTE: in order for embedded literals and data to get referenced correctly, + * the alignment of functions must be preserved when copying. To ensure this, + * the source and destination addresses for fncpy() must be aligned to a + * multiple of 8 bytes: you will be get a BUG() if this condition is not met. + * You will typically need a ".align 3" directive in the assembler where the + * function to be copied is defined, and ensure that your allocator for the + * destination buffer returns 8-byte-aligned pointers. + * + * Typical usage example: + * + * extern int f(args); + * extern uint32_t size_of_f; + * int (*copied_f)(args); + * void *sram_buffer; + * + * copied_f = fncpy(sram_buffer, &f, size_of_f); + * + * ... later, call the function: ... + * + * copied_f(args); + * + * The size of the function to be copied can't be determined from C: + * this must be determined by other means, such as adding assmbler directives + * in the file where f is defined. + */ + +#ifndef __ASM_FNCPY_H +#define __ASM_FNCPY_H + +#include +#include + +#include +#include + +/* + * Minimum alignment requirement for the source and destination addresses + * for function copying. + */ +#define FNCPY_ALIGN 8 + +#define fncpy(dest_buf, funcp, size) ({ \ + uintptr_t __funcp_address; \ + typeof(funcp) __result; \ + \ + asm("" : "=r" (__funcp_address) : "0" (funcp)); \ + \ + /* \ + * Ensure alignment of source and destination addresses, \ + * disregarding the function's Thumb bit: \ + */ \ + BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ + (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ + \ + memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ + flush_icache_range((unsigned long)(dest_buf), \ + (unsigned long)(dest_buf) + (size)); \ + \ + asm("" : "=r" (__result) \ + : "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \ + \ + __result; \ +}) + +#endif /* !__ASM_FNCPY_H */ -- cgit v1.1 From 6323f0ccedf756dfe5f46549cec69a2d6d97937b Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 16 Jan 2011 18:02:17 +0000 Subject: ARM: bitops: switch set/clear/change bitops to use ldrex/strex Switch the set/clear/change bitops to use the word-based exclusive operations, which are only present in a wider range of ARM architectures than the byte-based exclusive operations. Tested record: - Nicolas Pitre: ext3,rw,le - Sourav Poddar: nfs,le - Will Deacon: ext3,rw,le - Tony Lindgren: ext3+nfs,le Reviewed-by: Nicolas Pitre Tested-by: Sourav Poddar Tested-by: Will Deacon Tested-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/include/asm/bitops.h | 60 ++++++++++++++++--------------------------- 1 file changed, 22 insertions(+), 38 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 7b1bb2b..af54ed1 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -149,14 +149,18 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) */ /* + * Native endian assembly bitops. nr = 0 -> word 0 bit 0. + */ +extern void _set_bit(int nr, volatile unsigned long * p); +extern void _clear_bit(int nr, volatile unsigned long * p); +extern void _change_bit(int nr, volatile unsigned long * p); +extern int _test_and_set_bit(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit(int nr, volatile unsigned long * p); +extern int _test_and_change_bit(int nr, volatile unsigned long * p); + +/* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ -extern void _set_bit_le(int nr, volatile unsigned long * p); -extern void _clear_bit_le(int nr, volatile unsigned long * p); -extern void _change_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_le(const void * p, unsigned size); extern int _find_next_zero_bit_le(const void * p, int size, int offset); extern int _find_first_bit_le(const unsigned long *p, unsigned size); @@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); /* * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. */ -extern void _set_bit_be(int nr, volatile unsigned long * p); -extern void _clear_bit_be(int nr, volatile unsigned long * p); -extern void _change_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_be(const void * p, unsigned size); extern int _find_next_zero_bit_be(const void * p, int size, int offset); extern int _find_first_bit_be(const unsigned long *p, unsigned size); @@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); /* * The __* form of bitops are non-atomic and may be reordered. */ -#define ATOMIC_BITOP_LE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_le(nr,p)) - -#define ATOMIC_BITOP_BE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_be(nr,p)) +#define ATOMIC_BITOP(name,nr,p) \ + (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) #else -#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) -#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) +#define ATOMIC_BITOP(name,nr,p) _##name(nr,p) #endif -#define NONATOMIC_BITOP(name,nr,p) \ - (____nonatomic_##name(nr, p)) +/* + * Native endian atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p) #ifndef __ARMEB__ /* * These are the little endian, atomic definitions. */ -#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) -#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) -#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) -#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_le(p,sz) @@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #define WORD_BITOFF_TO_LE(x) ((x)) #else - /* * These are the big endian, atomic definitions. */ -#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) -#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) -#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) -#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_be(p,sz) -- cgit v1.1 From 000d9c78eb5cd7f18e3d6a381d66e606bc9b8196 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 15 Jan 2011 16:22:12 +0000 Subject: ARM: v6k: remove CPU_32v6K dependencies in asm/spinlock.h SMP requires at least the ARMv6K extensions to be present, so if we're running on SMP, the WFE and SEV instructions must be available. However, when we run on UP, the v6K extensions may not be available, and so we don't want WFE/SEV to be in the instruction stream. Use the SMP alternatives infrastructure to replace these instructions with NOPs if we build for SMP but run on UP. Tested-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 17eb355..da1af52 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,17 +5,36 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +/* + * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K + * extensions, so when running on UP, we have to patch these instructions away. + */ +#define ALT_SMP(smp, up) \ + "9998: " smp "\n" \ + " .pushsection \".alt.smp.init\", \"a\"\n" \ + " .long 9998b\n" \ + " " up "\n" \ + " .popsection\n" + +#ifdef CONFIG_THUMB2_KERNEL +#define SEV ALT_SMP("sev.w", "nop.w") +#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w") +#else +#define SEV ALT_SMP("sev", "nop") +#define WFE(cond) ALT_SMP("wfe" cond, "nop") +#endif + static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 __asm__ __volatile__ ( "dsb\n" - "sev" + SEV ); -#elif defined(CONFIG_CPU_32v6K) +#else __asm__ __volatile__ ( "mcr p15, 0, %0, c7, c10, 4\n" - "sev" + SEV : : "r" (0) ); #endif @@ -46,9 +65,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" -#ifdef CONFIG_CPU_32v6K -" wfene\n" -#endif + WFE("ne") " strexeq %0, %2, [%1]\n" " teqeq %0, #0\n" " bne 1b" @@ -107,9 +124,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" -#ifdef CONFIG_CPU_32v6K -" wfene\n" -#endif + WFE("ne") " strexeq %0, %2, [%1]\n" " teq %0, #0\n" " bne 1b" @@ -176,9 +191,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) "1: ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" -#ifdef CONFIG_CPU_32v6K -" wfemi\n" -#endif + WFE("mi") " rsbpls %0, %1, #0\n" " bmi 1b" : "=&r" (tmp), "=&r" (tmp2) -- cgit v1.1 From e399b1a4e1d205bdc816cb550d2064f2eb1ddc4c Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:08:32 +0000 Subject: ARM: v6k: introduce CPU_V6K option Introduce a CPU_V6K configuration option for platforms to select if they have a V6K CPU core. This allows us to identify whether we need to support ARMv6 CPUs without the V6K SMP extensions at build time. Currently CPU_V6K is just an alias for CPU_V6, and all places which reference CPU_V6 are replaced by (CPU_V6 || CPU_V6K). Select CPU_V6K from platforms which are known to be V6K-only. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 5 +++-- arch/arm/include/asm/proc-fns.h | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa..7d0614f 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,7 +116,7 @@ # define MULTI_CACHE 1 #endif -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) //# ifdef _CACHE # define MULTI_CACHE 1 //# else @@ -316,7 +316,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 * will fall through to use __flush_icache_all_generic. */ -#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \ +#if (defined(CONFIG_CPU_V7) && \ + (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ defined(CONFIG_SMP_ON_UP) #define __flush_icache_preferred __cpuc_flush_icache_all #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9b..296ca47 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -231,7 +231,7 @@ # endif #endif -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) # ifdef CPU_NAME # undef MULTI_CPU # define MULTI_CPU -- cgit v1.1 From 4ed67a53591db641543d57f31c182591a429dc93 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:42:42 +0000 Subject: ARM: v6k: select cmpxchg code sequences according to V6 variants If CONFIG_CPU_V6 is enabled, we must avoid the byte/halfword/doubleword exclusive operations, which aren't implemented before V6K. Use the generic versions (or omit them) instead. If CONFIG_CPU_V6 is not set, but CONFIG_CPU_32v6K is enabled, we have the K extnesions, so use these new instructions. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/system.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 97f6d60..9a87823 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -347,6 +347,7 @@ void cpu_idle_wait(void); #include #if __LINUX_ARM_ARCH__ < 6 +/* min ARCH < ARMv6 */ #ifdef CONFIG_SMP #error "SMP is not supported on this platform" @@ -365,7 +366,7 @@ void cpu_idle_wait(void); #include #endif -#else /* __LINUX_ARM_ARCH__ >= 6 */ +#else /* min ARCH >= ARMv6 */ extern void __bad_cmpxchg(volatile void *ptr, int size); @@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long oldval, res; switch (size) { -#ifdef CONFIG_CPU_32v6K +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ case 1: do { asm volatile("@ __cmpxchg1\n" @@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, : "memory", "cc"); } while (res); break; -#endif /* CONFIG_CPU_32v6K */ +#endif case 4: do { asm volatile("@ __cmpxchg4\n" @@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long ret; switch (size) { -#ifndef CONFIG_CPU_32v6K +#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ case 1: case 2: ret = __cmpxchg_local_generic(ptr, old, new, size); break; -#endif /* !CONFIG_CPU_32v6K */ +#endif default: ret = __cmpxchg(ptr, old, new, size); } @@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, (unsigned long)(n), \ sizeof(*(ptr)))) -#ifdef CONFIG_CPU_32v6K +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ /* * Note : ARMv7-M (currently unsupported by Linux) does not support @@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, (unsigned long long)(o), \ (unsigned long long)(n))) -#else /* !CONFIG_CPU_32v6K */ +#else /* min ARCH = ARMv6 */ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif /* CONFIG_CPU_32v6K */ +#endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ -- cgit v1.1 From 37bc618fe2689a7f8de8fac82e72b00ecea4d43d Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 16:38:56 +0000 Subject: ARM: v6k: select TLS register code according to V6 variants If CONFIG_CPU_V6 is enabled, we may or may not have the TLS register. Use the conditional code which copes with this variability. Otherwise, if CONFIG_CPU_32v6K is set, we know we have the TLS register on all supported CPUs, so use it unconditionally. Acked-by: Nicolas Pitre Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/tls.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index e71d6ff..60843eb 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -28,15 +28,14 @@ #define tls_emu 1 #define has_tls_reg 1 #define set_tls set_tls_none -#elif __LINUX_ARM_ARCH__ >= 7 || \ - (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define tls_emu 0 -#define has_tls_reg 1 -#define set_tls set_tls_v6k -#elif __LINUX_ARM_ARCH__ == 6 +#elif defined(CONFIG_CPU_V6) #define tls_emu 0 #define has_tls_reg (elf_hwcap & HWCAP_TLS) #define set_tls set_tls_v6 +#elif defined(CONFIG_CPU_32v6K) +#define tls_emu 0 +#define has_tls_reg 1 +#define set_tls set_tls_v6k #else #define tls_emu 0 #define has_tls_reg 0 -- cgit v1.1 From 774c096bf9e49eebf7b5d2d9fdddf632c29ccea0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 23 Jan 2011 13:04:53 +0000 Subject: ARM: v6/v7 cache: allow cache calls to be optimized The v6 cache call optimization was disabled to allow the optional block cache operations to be subsituted on CPUs which supported those operations. However, as that functionality was removed, we no longer need to prevent this optimization being taken advantage of. The v7 cache call optimization was just a copy of the v6, so also fix that too. Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 7d0614f..d9b4c42 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,20 +116,20 @@ # define MULTI_CACHE 1 #endif -#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) -//# ifdef _CACHE +#if defined(CONFIG_CPU_CACHE_V6) +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v6 -//# endif +# else +# define _CACHE v6 +# endif #endif -#if defined(CONFIG_CPU_V7) -//# ifdef _CACHE +#if defined(CONFIG_CPU_CACHE_V7) +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v7 -//# endif +# else +# define _CACHE v7 +# endif #endif #if !defined(_CACHE) && !defined(MULTI_CACHE) -- cgit v1.1 From 917692f5f7ec63de3b093c825913d68e910db282 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 9 Feb 2011 12:06:59 +0100 Subject: ARM: 6655/1: Correct WFE() in asm/spinlock.h for Thumb-2 The content for ALT_SMP() in the definition of WFE() expands to 6 bytes (IT cc ; WFEcc.W), which breaks the assumptions of the fixup code, leading to lockups when the affected code gets run. This patch works around the problem by explicitly using an IT + WFEcc.N pair. Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index da1af52..fdd3820 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -18,7 +18,23 @@ #ifdef CONFIG_THUMB2_KERNEL #define SEV ALT_SMP("sev.w", "nop.w") -#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w") +/* + * For Thumb-2, special care is needed to ensure that the conditional WFE + * instruction really does assemble to exactly 4 bytes (as required by + * the SMP_ON_UP fixup code). By itself "wfene" might cause the + * assembler to insert a extra (16-bit) IT instruction, depending on the + * presence or absence of neighbouring conditional instructions. + * + * To avoid this unpredictableness, an approprite IT is inserted explicitly: + * the assembler won't change IT instructions which are explicitly present + * in the input. + */ +#define WFE(cond) ALT_SMP( \ + "it " cond "\n\t" \ + "wfe" cond ".n", \ + \ + "nop.w" \ +) #else #define SEV ALT_SMP("sev", "nop") #define WFE(cond) ALT_SMP("wfe" cond, "nop") -- cgit v1.1 From 292ec42af7c6361435fe9df50cd59ec76f6741c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 4 Feb 2011 10:36:39 +0000 Subject: ARM: pm: add function to set WFI low-power mode for SMP CPUs Add a function to set the SCU low-power mode for SMP CPUs. This centralizes this functionality rather than having to expose the SCU register definitions to each platform. Signed-off-by: Russell King --- arch/arm/include/asm/smp_scu.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 2376835..4eb6d00 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -1,7 +1,14 @@ #ifndef __ASMARM_ARCH_SCU_H #define __ASMARM_ARCH_SCU_H +#define SCU_PM_NORMAL 0 +#define SCU_PM_DORMANT 2 +#define SCU_PM_POWEROFF 3 + +#ifndef __ASSEMBLER__ unsigned int scu_get_core_count(void __iomem *); void scu_enable(void __iomem *); +int scu_power_mode(void __iomem *, unsigned int); +#endif #endif -- cgit v1.1 From 753790e713d80b50b867fa1ed32ec0eb5e82ae8e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 6 Feb 2011 15:32:24 +0000 Subject: ARM: move cache/processor/fault glue to separate include files This allows the cache/processor/fault glue to be more easily used from assembler code. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 133 +---------------- arch/arm/include/asm/cpu-multi32.h | 69 --------- arch/arm/include/asm/cpu-single.h | 44 ------ arch/arm/include/asm/glue-cache.h | 146 ++++++++++++++++++ arch/arm/include/asm/glue-df.h | 110 ++++++++++++++ arch/arm/include/asm/glue-pf.h | 57 +++++++ arch/arm/include/asm/glue-proc.h | 261 ++++++++++++++++++++++++++++++++ arch/arm/include/asm/glue.h | 138 ----------------- arch/arm/include/asm/proc-fns.h | 299 ++++++++----------------------------- 9 files changed, 640 insertions(+), 617 deletions(-) delete mode 100644 arch/arm/include/asm/cpu-multi32.h delete mode 100644 arch/arm/include/asm/cpu-single.h create mode 100644 arch/arm/include/asm/glue-cache.h create mode 100644 arch/arm/include/asm/glue-df.h create mode 100644 arch/arm/include/asm/glue-pf.h create mode 100644 arch/arm/include/asm/glue-proc.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa..18a5664 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -12,7 +12,7 @@ #include -#include +#include #include #include #include @@ -20,123 +20,6 @@ #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) /* - * Cache Model - * =========== - */ -#undef _CACHE -#undef MULTI_CACHE - -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - -#if defined(CONFIG_CPU_CACHE_V4) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v4 -# endif -#endif - -#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ - defined(CONFIG_CPU_ARM1026) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_FA526) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE fa -# endif -#endif - -#if defined(CONFIG_CPU_ARM926T) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm926 -# endif -#endif - -#if defined(CONFIG_CPU_ARM940T) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm940 -# endif -#endif - -#if defined(CONFIG_CPU_ARM946E) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm946 -# endif -#endif - -#if defined(CONFIG_CPU_CACHE_V4WB) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v4wb -# endif -#endif - -#if defined(CONFIG_CPU_XSCALE) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE xscale -# endif -#endif - -#if defined(CONFIG_CPU_XSC3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE xsc3 -# endif -#endif - -#if defined(CONFIG_CPU_MOHAWK) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE mohawk -# endif -#endif - -#if defined(CONFIG_CPU_FEROCEON) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_V6) -//# ifdef _CACHE -# define MULTI_CACHE 1 -//# else -//# define _CACHE v6 -//# endif -#endif - -#if defined(CONFIG_CPU_V7) -//# ifdef _CACHE -# define MULTI_CACHE 1 -//# else -//# define _CACHE v7 -//# endif -#endif - -#if !defined(_CACHE) && !defined(MULTI_CACHE) -#error Unknown cache maintainence model -#endif - -/* * This flag is used to indicate that the page pointed to by a pte is clean * and does not require cleaning before returning it to the user. */ @@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache; * visible to the CPU. */ #define dmac_map_area cpu_cache.dma_map_area -#define dmac_unmap_area cpu_cache.dma_unmap_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else -#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) -#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) -#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) -#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) -#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) -#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) -#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) - extern void __cpuc_flush_icache_all(void); extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); @@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_map_area __glue(_CACHE,_dma_map_area) -#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) -#define dmac_flush_range __glue(_CACHE,_dma_flush_range) - extern void dmac_map_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); diff --git a/arch/arm/include/asm/cpu-multi32.h b/arch/arm/include/asm/cpu-multi32.h deleted file mode 100644 index e2b5b0b..0000000 --- a/arch/arm/include/asm/cpu-multi32.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * arch/arm/include/asm/cpu-multi32.h - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include - -struct mm_struct; - -/* - * Don't change this structure - ASM code - * relies on it. - */ -extern struct processor { - /* MISC - * get data abort address/flags - */ - void (*_data_abort)(unsigned long pc); - /* - * Retrieve prefetch fault address - */ - unsigned long (*_prefetch_abort)(unsigned long lr); - /* - * Set up any processor specifics - */ - void (*_proc_init)(void); - /* - * Disable any processor specifics - */ - void (*_proc_fin)(void); - /* - * Special stuff for a reset - */ - void (*reset)(unsigned long addr) __attribute__((noreturn)); - /* - * Idle the processor - */ - int (*_do_idle)(void); - /* - * Processor architecture specific - */ - /* - * clean a virtual address range from the - * D-cache without flushing the cache. - */ - void (*dcache_clean_area)(void *addr, int size); - - /* - * Set the page table - */ - void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); - /* - * Set a possibly extended PTE. Non-extended PTEs should - * ignore 'ext'. - */ - void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); -} processor; - -#define cpu_proc_init() processor._proc_init() -#define cpu_proc_fin() processor._proc_fin() -#define cpu_reset(addr) processor.reset(addr) -#define cpu_do_idle() processor._do_idle() -#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) -#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) -#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) diff --git a/arch/arm/include/asm/cpu-single.h b/arch/arm/include/asm/cpu-single.h deleted file mode 100644 index f073a6d..0000000 --- a/arch/arm/include/asm/cpu-single.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * arch/arm/include/asm/cpu-single.h - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -/* - * Single CPU - */ -#ifdef __STDC__ -#define __catify_fn(name,x) name##x -#else -#define __catify_fn(name,x) name/**/x -#endif -#define __cpu_fn(name,x) __catify_fn(name,x) - -/* - * If we are supporting multiple CPUs, then we must use a table of - * function pointers for this lot. Otherwise, we can optimise the - * table away. - */ -#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init) -#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) -#define cpu_reset __cpu_fn(CPU_NAME,_reset) -#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) -#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area) -#define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm) -#define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext) - -#include - -struct mm_struct; - -/* declare all the functions as extern */ -extern void cpu_proc_init(void); -extern void cpu_proc_fin(void); -extern int cpu_do_idle(void); -extern void cpu_dcache_clean_area(void *, int); -extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); -extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); -extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h new file mode 100644 index 0000000..0591d35 --- /dev/null +++ b/arch/arm/include/asm/glue-cache.h @@ -0,0 +1,146 @@ +/* + * arch/arm/include/asm/glue-cache.h + * + * Copyright (C) 1999-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_CACHE_H +#define ASM_GLUE_CACHE_H + +#include + +/* + * Cache Model + * =========== + */ +#undef _CACHE +#undef MULTI_CACHE + +#if defined(CONFIG_CPU_CACHE_V3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v3 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4 +# endif +#endif + +#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ + defined(CONFIG_CPU_ARM1026) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_FA526) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE fa +# endif +#endif + +#if defined(CONFIG_CPU_ARM926T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm926 +# endif +#endif + +#if defined(CONFIG_CPU_ARM940T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm940 +# endif +#endif + +#if defined(CONFIG_CPU_ARM946E) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm946 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4WB) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4wb +# endif +#endif + +#if defined(CONFIG_CPU_XSCALE) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xscale +# endif +#endif + +#if defined(CONFIG_CPU_XSC3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xsc3 +# endif +#endif + +#if defined(CONFIG_CPU_MOHAWK) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE mohawk +# endif +#endif + +#if defined(CONFIG_CPU_FEROCEON) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_V6) +//# ifdef _CACHE +# define MULTI_CACHE 1 +//# else +//# define _CACHE v6 +//# endif +#endif + +#if defined(CONFIG_CPU_V7) +//# ifdef _CACHE +# define MULTI_CACHE 1 +//# else +//# define _CACHE v7 +//# endif +#endif + +#if !defined(_CACHE) && !defined(MULTI_CACHE) +#error Unknown cache maintainence model +#endif + +#ifndef MULTI_CACHE +#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) +#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) +#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) +#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) +#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) + +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) +#define dmac_flush_range __glue(_CACHE,_dma_flush_range) +#endif + +#endif diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h new file mode 100644 index 0000000..354d571 --- /dev/null +++ b/arch/arm/include/asm/glue-df.h @@ -0,0 +1,110 @@ +/* + * arch/arm/include/asm/glue-df.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_DF_H +#define ASM_GLUE_DF_H + +#include + +/* + * Data Abort Model + * ================ + * + * We have the following to choose from: + * arm6 - ARM6 style + * arm7 - ARM7 style + * v4_early - ARMv4 without Thumb early abort handler + * v4t_late - ARMv4 with Thumb late abort handler + * v4t_early - ARMv4 with Thumb early abort handler + * v5tej_early - ARMv5 with Thumb and Java early abort handler + * xscale - ARMv5 with Thumb with Xscale extensions + * v6_early - ARMv6 generic early abort handler + * v7_early - ARMv7 generic early abort handler + */ +#undef CPU_DABORT_HANDLER +#undef MULTI_DABORT + +#if defined(CONFIG_CPU_ARM610) +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER cpu_arm6_data_abort +# endif +#endif + +#if defined(CONFIG_CPU_ARM710) +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER cpu_arm7_data_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_LV4T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4t_late_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV4 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV4T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4t_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV5TJ +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v5tj_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV5T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v5t_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV6 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v6_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV7 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v7_early_abort +# endif +#endif + +#ifndef CPU_DABORT_HANDLER +#error Unknown data abort handler type +#endif + +#endif diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h new file mode 100644 index 0000000..d385f37 --- /dev/null +++ b/arch/arm/include/asm/glue-pf.h @@ -0,0 +1,57 @@ +/* + * arch/arm/include/asm/glue-pf.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PF_H +#define ASM_GLUE_PF_H + +#include + +/* + * Prefetch Abort Model + * ================ + * + * We have the following to choose from: + * legacy - no IFSR, no IFAR + * v6 - ARMv6: IFSR, no IFAR + * v7 - ARMv7: IFSR and IFAR + */ + +#undef CPU_PABORT_HANDLER +#undef MULTI_PABORT + +#ifdef CONFIG_CPU_PABRT_LEGACY +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER legacy_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V6 +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER v6_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V7 +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER v7_pabort +# endif +#endif + +#ifndef CPU_PABORT_HANDLER +#error Unknown prefetch abort handler type +#endif + +#endif diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h new file mode 100644 index 0000000..e3bf443 --- /dev/null +++ b/arch/arm/include/asm/glue-proc.h @@ -0,0 +1,261 @@ +/* + * arch/arm/include/asm/glue-proc.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PROC_H +#define ASM_GLUE_PROC_H + +#include + +/* + * Work out if we need multiple CPU support + */ +#undef MULTI_CPU +#undef CPU_NAME + +/* + * CPU_NAME - the prefix for CPU related functions + */ + +#ifdef CONFIG_CPU_ARM610 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm6 +# endif +#endif + +#ifdef CONFIG_CPU_ARM7TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM710 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7 +# endif +#endif + +#ifdef CONFIG_CPU_ARM720T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm720 +# endif +#endif + +#ifdef CONFIG_CPU_ARM740T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm740 +# endif +#endif + +#ifdef CONFIG_CPU_ARM9TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm9tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM920T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm920 +# endif +#endif + +#ifdef CONFIG_CPU_ARM922T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm922 +# endif +#endif + +#ifdef CONFIG_CPU_FA526 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_fa526 +# endif +#endif + +#ifdef CONFIG_CPU_ARM925T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm925 +# endif +#endif + +#ifdef CONFIG_CPU_ARM926T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm926 +# endif +#endif + +#ifdef CONFIG_CPU_ARM940T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm940 +# endif +#endif + +#ifdef CONFIG_CPU_ARM946E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm946 +# endif +#endif + +#ifdef CONFIG_CPU_SA110 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa110 +# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa1100 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020e +# endif +#endif + +#ifdef CONFIG_CPU_ARM1022 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1022 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1026 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1026 +# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xscale +# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xsc3 +# endif +#endif + +#ifdef CONFIG_CPU_MOHAWK +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_mohawk +# endif +#endif + +#ifdef CONFIG_CPU_FEROCEON +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_feroceon +# endif +#endif + +#ifdef CONFIG_CPU_V6 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v6 +# endif +#endif + +#ifdef CONFIG_CPU_V7 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v7 +# endif +#endif + +#ifndef MULTI_CPU +#define cpu_proc_init __glue(CPU_NAME,_proc_init) +#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) +#define cpu_reset __glue(CPU_NAME,_reset) +#define cpu_do_idle __glue(CPU_NAME,_do_idle) +#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) +#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) +#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) +#endif + +#endif diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h index 234a3fc..0ec35d1 100644 --- a/arch/arm/include/asm/glue.h +++ b/arch/arm/include/asm/glue.h @@ -15,7 +15,6 @@ */ #ifdef __KERNEL__ - #ifdef __STDC__ #define ____glue(name,fn) name##fn #else @@ -23,141 +22,4 @@ #endif #define __glue(name,fn) ____glue(name,fn) - - -/* - * Data Abort Model - * ================ - * - * We have the following to choose from: - * arm6 - ARM6 style - * arm7 - ARM7 style - * v4_early - ARMv4 without Thumb early abort handler - * v4t_late - ARMv4 with Thumb late abort handler - * v4t_early - ARMv4 with Thumb early abort handler - * v5tej_early - ARMv5 with Thumb and Java early abort handler - * xscale - ARMv5 with Thumb with Xscale extensions - * v6_early - ARMv6 generic early abort handler - * v7_early - ARMv7 generic early abort handler - */ -#undef CPU_DABORT_HANDLER -#undef MULTI_DABORT - -#if defined(CONFIG_CPU_ARM610) -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER cpu_arm6_data_abort -# endif -#endif - -#if defined(CONFIG_CPU_ARM710) -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER cpu_arm7_data_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_LV4T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4t_late_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV4 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV4T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4t_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV5TJ -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v5tj_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV5T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v5t_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV6 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v6_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV7 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v7_early_abort -# endif -#endif - -#ifndef CPU_DABORT_HANDLER -#error Unknown data abort handler type -#endif - -/* - * Prefetch Abort Model - * ================ - * - * We have the following to choose from: - * legacy - no IFSR, no IFAR - * v6 - ARMv6: IFSR, no IFAR - * v7 - ARMv7: IFSR and IFAR - */ - -#undef CPU_PABORT_HANDLER -#undef MULTI_PABORT - -#ifdef CONFIG_CPU_PABRT_LEGACY -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER legacy_pabort -# endif -#endif - -#ifdef CONFIG_CPU_PABRT_V6 -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER v6_pabort -# endif -#endif - -#ifdef CONFIG_CPU_PABRT_V7 -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER v7_pabort -# endif -#endif - -#ifndef CPU_PABORT_HANDLER -#error Unknown prefetch abort handler type -#endif - #endif diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9b..6980215 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -13,248 +13,77 @@ #ifdef __KERNEL__ +#include +#include -/* - * Work out if we need multiple CPU support - */ -#undef MULTI_CPU -#undef CPU_NAME +#ifndef __ASSEMBLY__ + +struct mm_struct; /* - * CPU_NAME - the prefix for CPU related functions + * Don't change this structure - ASM code relies on it. */ - -#ifdef CONFIG_CPU_ARM610 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm6 -# endif -#endif - -#ifdef CONFIG_CPU_ARM7TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM710 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7 -# endif -#endif - -#ifdef CONFIG_CPU_ARM720T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm720 -# endif -#endif - -#ifdef CONFIG_CPU_ARM740T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm740 -# endif -#endif - -#ifdef CONFIG_CPU_ARM9TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm9tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM920T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm920 -# endif -#endif - -#ifdef CONFIG_CPU_ARM922T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm922 -# endif -#endif - -#ifdef CONFIG_CPU_FA526 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_fa526 -# endif -#endif - -#ifdef CONFIG_CPU_ARM925T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm925 -# endif -#endif - -#ifdef CONFIG_CPU_ARM926T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm926 -# endif -#endif - -#ifdef CONFIG_CPU_ARM940T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm940 -# endif -#endif - -#ifdef CONFIG_CPU_ARM946E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm946 -# endif -#endif - -#ifdef CONFIG_CPU_SA110 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa110 -# endif -#endif - -#ifdef CONFIG_CPU_SA1100 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa1100 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1020 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1020E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020e -# endif -#endif - -#ifdef CONFIG_CPU_ARM1022 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1022 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1026 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1026 -# endif -#endif - -#ifdef CONFIG_CPU_XSCALE -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xscale -# endif -#endif - -#ifdef CONFIG_CPU_XSC3 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xsc3 -# endif -#endif - -#ifdef CONFIG_CPU_MOHAWK -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_mohawk -# endif -#endif - -#ifdef CONFIG_CPU_FEROCEON -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_feroceon -# endif -#endif - -#ifdef CONFIG_CPU_V6 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v6 -# endif -#endif - -#ifdef CONFIG_CPU_V7 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v7 -# endif -#endif - -#ifndef __ASSEMBLY__ +extern struct processor { + /* MISC + * get data abort address/flags + */ + void (*_data_abort)(unsigned long pc); + /* + * Retrieve prefetch fault address + */ + unsigned long (*_prefetch_abort)(unsigned long lr); + /* + * Set up any processor specifics + */ + void (*_proc_init)(void); + /* + * Disable any processor specifics + */ + void (*_proc_fin)(void); + /* + * Special stuff for a reset + */ + void (*reset)(unsigned long addr) __attribute__((noreturn)); + /* + * Idle the processor + */ + int (*_do_idle)(void); + /* + * Processor architecture specific + */ + /* + * clean a virtual address range from the + * D-cache without flushing the cache. + */ + void (*dcache_clean_area)(void *addr, int size); + + /* + * Set the page table + */ + void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); + /* + * Set a possibly extended PTE. Non-extended PTEs should + * ignore 'ext'. + */ + void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); +} processor; #ifndef MULTI_CPU -#include +extern void cpu_proc_init(void); +extern void cpu_proc_fin(void); +extern int cpu_do_idle(void); +extern void cpu_dcache_clean_area(void *, int); +extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); +extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); #else -#include +#define cpu_proc_init() processor._proc_init() +#define cpu_proc_fin() processor._proc_fin() +#define cpu_reset(addr) processor.reset(addr) +#define cpu_do_idle() processor._do_idle() +#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) +#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) +#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) #endif #include -- cgit v1.1 From f4117ac9e237b74afdf5e001d5ea26a4d15e9847 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 18:07:14 +0000 Subject: ARM: P2V: separate PHYS_OFFSET from platform definitions This uncouple PHYS_OFFSET from the platform definitions, thereby facilitating run-time computation of the physical memory offset. Acked-by: Nicolas Pitre Acked-by: Viresh Kumar Acked-by: H Hartley Sweeten Acked-by: Magnus Damm Acked-by: Tony Lindgren Acked-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Wan ZongShun Acked-by: Kukjin Kim Acked-by: Eric Miao Acked-by: Jiandong Zheng Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index d0ee74b..2efec57 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -24,6 +24,8 @@ */ #define UL(x) _AC(x, UL) +#define PHYS_OFFSET PLAT_PHYS_OFFSET + #ifdef CONFIG_MMU /* -- cgit v1.1 From dc21af99fadcfa0ae65b52fd0895f85824f0c288 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:09:43 +0000 Subject: ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching This idea came from Nicolas, Eric Miao produced an initial version, which was then rewritten into this. Patch the physical to virtual translations at runtime. As we modify the code, this makes it incompatible with XIP kernels, but allows us to achieve this with minimal loss of performance. As many translations are of the form: physical = virtual + (PHYS_OFFSET - PAGE_OFFSET) virtual = physical - (PHYS_OFFSET - PAGE_OFFSET) we generate an 'add' instruction for __virt_to_phys(), and a 'sub' instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET - PAGE_OFFSET) by comparing the address prior to MMU initialization with where it should be once the MMU has been initialized, and place this constant into the above add/sub instructions. Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for the C-mode PHYS_OFFSET variable definition to use. At present, we are unable to support Realview with Sparsemem enabled as this uses a complex mapping function, and MSM as this requires a constant which will not fit in our math instruction. Add a module version magic string for this feature to prevent incompatible modules being loaded. Tested-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 55 +++++++++++++++++++++++++++++++++---------- arch/arm/include/asm/module.h | 15 ++++++++++-- 2 files changed, 56 insertions(+), 14 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2efec57..7197879 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -24,8 +24,6 @@ */ #define UL(x) _AC(x, UL) -#define PHYS_OFFSET PLAT_PHYS_OFFSET - #ifdef CONFIG_MMU /* @@ -135,16 +133,6 @@ #endif /* - * Physical vs virtual RAM address space conversion. These are - * private definitions which should NOT be used outside memory.h - * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. - */ -#ifndef __virt_to_phys -#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) -#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) -#endif - -/* * Convert a physical address to a Page Frame Number and back */ #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) @@ -159,6 +147,49 @@ #ifndef __ASSEMBLY__ /* + * Physical vs virtual RAM address space conversion. These are + * private definitions which should NOT be used outside memory.h + * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + */ +#ifndef __virt_to_phys +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +extern unsigned long __pv_phys_offset; +#define PHYS_OFFSET __pv_phys_offset + +#define __pv_stub(from,to,instr) \ + __asm__("@ __pv_stub\n" \ + "1: " instr " %0, %1, %2\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "=r" (to) \ + : "r" (from), "I" (0x81000000)) + +static inline unsigned long __virt_to_phys(unsigned long x) +{ + unsigned long t; + __pv_stub(x, t, "add"); + return t; +} + +static inline unsigned long __phys_to_virt(unsigned long x) +{ + unsigned long t; + __pv_stub(x, t, "sub"); + return t; +} +#else +#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) +#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) +#endif +#endif + +#ifndef PHYS_OFFSET +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#endif + +/* * The DMA mask corresponding to the maximum bus address allocatable * using GFP_DMA. The default here places no restriction on DMA * allocations. This must be the smallest DMA mask in the system, diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 12c8e68..d072c21 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -25,8 +25,19 @@ struct mod_arch_specific { }; /* - * Include the ARM architecture version. + * Add the ARM architecture version to the version magic string */ -#define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " " +#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " " + +/* Add __virt_to_phys patching state as well */ +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#define MODULE_ARCH_VERMAGIC_P2V "p2v8 " +#else +#define MODULE_ARCH_VERMAGIC_P2V "" +#endif + +#define MODULE_ARCH_VERMAGIC \ + MODULE_ARCH_VERMAGIC_ARMVSN \ + MODULE_ARCH_VERMAGIC_P2V #endif /* _ASM_ARM_MODULE_H */ -- cgit v1.1 From cada3c0841e1deaec4c0f92654610b028dc683ff Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:39:29 +0000 Subject: ARM: P2V: extend to 16-bit translation offsets MSM's memory is aligned to 2MB, which is more than we can do with our existing method as we're limited to the upper 8 bits. Extend this by using two instructions to 16 bits, automatically selected when MSM is enabled. Acked-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 21 +++++++++++++++++---- arch/arm/include/asm/module.h | 4 ++++ 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 7197879..2398b3f 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -154,29 +154,42 @@ #ifndef __virt_to_phys #ifdef CONFIG_ARM_PATCH_PHYS_VIRT +/* + * Constants used to force the right instruction encodings and shifts + * so that all we need to do is modify the 8-bit constant field. + */ +#define __PV_BITS_31_24 0x81000000 +#define __PV_BITS_23_16 0x00810000 + extern unsigned long __pv_phys_offset; #define PHYS_OFFSET __pv_phys_offset -#define __pv_stub(from,to,instr) \ +#define __pv_stub(from,to,instr,type) \ __asm__("@ __pv_stub\n" \ "1: " instr " %0, %1, %2\n" \ " .pushsection .pv_table,\"a\"\n" \ " .long 1b\n" \ " .popsection\n" \ : "=r" (to) \ - : "r" (from), "I" (0x81000000)) + : "r" (from), "I" (type)) static inline unsigned long __virt_to_phys(unsigned long x) { unsigned long t; - __pv_stub(x, t, "add"); + __pv_stub(x, t, "add", __PV_BITS_31_24); +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + __pv_stub(t, t, "add", __PV_BITS_23_16); +#endif return t; } static inline unsigned long __phys_to_virt(unsigned long x) { unsigned long t; - __pv_stub(x, t, "sub"); + __pv_stub(x, t, "sub", __PV_BITS_31_24); +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + __pv_stub(t, t, "sub", __PV_BITS_23_16); +#endif return t; } #else diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index d072c21..a2b775b 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -31,7 +31,11 @@ struct mod_arch_specific { /* Add __virt_to_phys patching state as well */ #ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT +#define MODULE_ARCH_VERMAGIC_P2V "p2v16 " +#else #define MODULE_ARCH_VERMAGIC_P2V "p2v8 " +#endif #else #define MODULE_ARCH_VERMAGIC_P2V "" #endif -- cgit v1.1 From 3a6b1676c6f27f7fad1a3d6fab5a95f90b1e7402 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Feb 2011 17:28:28 +0100 Subject: ARM: 6675/1: use phys_addr_t instead of unsigned long in conversion code The unsigned long datatype is not sufficient for mapping physical addresses >= 4GB. This patch ensures that the address conversion code in asm/memory.h casts to the correct type when handling physical addresses. The internal v2p macros only deal with lowmem addresses, so these do not need to be modified. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2398b3f..431077c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -135,8 +136,8 @@ /* * Convert a physical address to a Page Frame Number and back */ -#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) -#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) /* * Convert a page to/from a physical address @@ -234,12 +235,12 @@ static inline unsigned long __phys_to_virt(unsigned long x) * translation for translating DMA addresses. Use the driver * DMA support - see dma-mapping.h. */ -static inline unsigned long virt_to_phys(const volatile void *x) +static inline phys_addr_t virt_to_phys(const volatile void *x) { return __virt_to_phys((unsigned long)(x)); } -static inline void *phys_to_virt(unsigned long x) +static inline void *phys_to_virt(phys_addr_t x) { return (void *)(__phys_to_virt((unsigned long)(x))); } -- cgit v1.1 From f6b0fa02e8b0708d17d631afce456524eadf87ff Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 6 Feb 2011 15:48:39 +0000 Subject: ARM: pm: add generic CPU suspend/resume support This adds core support for saving and restoring CPU coprocessor registers for suspend/resume support. This contains support for suspend with ARM920, ARM926, SA11x0, PXA25x, PXA27x, PXA3xx, V6 and V7 CPUs. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Tested-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/include/asm/glue-proc.h | 3 +++ arch/arm/include/asm/proc-fns.h | 7 +++++++ 2 files changed, 10 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index e3bf443..6469521 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h @@ -256,6 +256,9 @@ #define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) #define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) #define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) +#define cpu_suspend_size __glue(CPU_NAME,_suspend_size) +#define cpu_do_suspend __glue(CPU_NAME,_do_suspend) +#define cpu_do_resume __glue(CPU_NAME,_do_resume) #endif #endif diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 6980215..8ec535e 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -66,6 +66,11 @@ extern struct processor { * ignore 'ext'. */ void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); + + /* Suspend/resume */ + unsigned int suspend_size; + void (*do_suspend)(void *); + void (*do_resume)(void *); } processor; #ifndef MULTI_CPU @@ -86,6 +91,8 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) #endif +extern void cpu_resume(void); + #include #ifdef CONFIG_MMU -- cgit v1.1 From 2bbd7e9b74271b2d6a14b4840fc44afbea83774d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 8 Jan 2011 12:05:09 +0000 Subject: ARM: fix some sparse errors in generic ARM code arch/arm/kernel/return_address.c:37:6: warning: symbol 'return_address' was not declared. Should it be static? arch/arm/kernel/setup.c:76:14: warning: symbol 'processor_id' was not declared. Should it be static? arch/arm/kernel/traps.c:259:1: warning: symbol 'die_lock' was not declared. Should it be static? arch/arm/vfp/vfpmodule.c:156:6: warning: symbol 'vfp_raise_sigfpe' was not declared. Should it be static? Signed-off-by: Russell King --- arch/arm/include/asm/cputype.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 20ae96c..ed5bc9e 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -23,6 +23,8 @@ #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" +extern unsigned int processor_id; + #ifdef CONFIG_CPU_CP15 #define read_cpuid(reg) \ ({ \ @@ -43,7 +45,6 @@ __val; \ }) #else -extern unsigned int processor_id; #define read_cpuid(reg) (processor_id) #define read_cpuid_ext(reg) 0 #endif -- cgit v1.1 From aaa50048f6ce44af66ce0389d4cc6a8348333271 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 25 Jan 2011 21:35:38 +0100 Subject: ARM: 6639/1: allow highmem on SMP platforms without h/w TLB ops broadcast In commit e616c591405c168f6dc3dfd1221e105adfe49b8d, highmem support was deactivated for SMP platforms without hardware TLB ops broadcast because usage of kmap_high_get() requires that IRQs be disabled when kmap_lock is locked which is incompatible with the IPI mechanism used by the software TLB ops broadcast invoked through flush_all_zero_pkmaps(). The reason for kmap_high_get() is to ensure that the currently kmap'd page usage count does not decrease to zero while we're using its existing virtual mapping in an atomic context. With a VIVT cache this is essential to do due to cache coherency issues, but with a VIPT cache this is only an optimization so not to pay the price of establishing a second mapping if an existing one can be used. However, on VIPT platforms without hardware TLB maintenance we can give up on that optimization in order to be able to use highmem. From ARMv7 onwards the TLB ops are broadcasted in hardware, so let's disable ARCH_NEEDS_KMAP_HIGH_GET only when CONFIG_SMP and CONFIG_CPU_TLB_V6 are defined. Signed-off-by: Nicolas Pitre Tested-by: Saeed Bishara Signed-off-by: Russell King --- arch/arm/include/asm/highmem.h | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7080e2c..a4edd19 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -19,11 +19,36 @@ extern pte_t *pkmap_page_table; +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +/* + * The reason for kmap_high_get() is to ensure that the currently kmap'd + * page usage count does not decrease to zero while we're using its + * existing virtual mapping in an atomic context. With a VIVT cache this + * is essential to do, but with a VIPT cache this is only an optimization + * so not to pay the price of establishing a second mapping if an existing + * one can be used. However, on platforms without hardware TLB maintenance + * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since + * the locking involved must also disable IRQs which is incompatible with + * the IPI mechanism used by global TLB operations. + */ #define ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) +#undef ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) +#error "The sum of features in your kernel config cannot be supported together" +#endif +#endif -extern void *kmap_high(struct page *page); +#ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); -extern void kunmap_high(struct page *page); +#else +static inline void *kmap_high_get(struct page *page) +{ + return NULL; +} +#endif /* * The following functions are already defined by -- cgit v1.1 From 425fc47adb5bb69f76285be77a09a3341a30799e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Feb 2011 14:31:09 +0100 Subject: ARM: 6668/1: ptrace: remove single-step emulation code PTRACE_SINGLESTEP is a ptrace request designed to offer single-stepping support to userspace when the underlying architecture has hardware support for this operation. On ARM, we set arch_has_single_step() to 1 and attempt to emulate hardware single-stepping by disassembling the current instruction to determine the next pc and placing a software breakpoint on that location. Unfortunately this has the following problems: 1.) Only a subset of ARMv7 instructions are supported 2.) Thumb-2 is unsupported 3.) The code is not SMP safe We could try to fix this code, but it turns out that because of the above issues it is rarely used in practice. GDB, for example, uses PTRACE_POKETEXT and PTRACE_PEEKTEXT to manage breakpoints itself and does not require any kernel assistance. This patch removes the single-step emulation code from ptrace meaning that the PTRACE_SINGLESTEP request will return -EIO on ARM. Portable code must check the return value from a ptrace call and handle the failure gracefully. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/processor.h | 12 ------------ arch/arm/include/asm/ptrace.h | 2 -- arch/arm/include/asm/traps.h | 1 + 3 files changed, 1 insertion(+), 14 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357ba..b439b41 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -29,19 +29,7 @@ #define STACK_TOP_MAX TASK_SIZE #endif -union debug_insn { - u32 arm; - u16 thumb; -}; - -struct debug_entry { - u32 address; - union debug_insn insn; -}; - struct debug_info { - int nsaved; - struct debug_entry bp[2]; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; #endif diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 783d50f..a8ff22b 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -130,8 +130,6 @@ struct pt_regs { #ifdef __KERNEL__ -#define arch_has_single_step() (1) - #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 1b960d5..f90756d 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr) extern void __init early_trap_init(void); extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); +extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); extern void *vectors_page; -- cgit v1.1 From 80f0aad77f3e1e9d9e518b09ac46963d628ae2be Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 25 Feb 2011 17:54:52 +0100 Subject: ARM: 6766/1: Thumb-2: Reflect ARM/Thumb-2 configuration in module vermagic Loading Thumb-2 modules into an ARM kernel or vice-versa isn't guaranteed to work safely, since the kernel is not interworking- aware everywhere. This patch adds "thumb2" to the module vermagic when CONFIG_THUMB2_KERNEL is enabled, to help avoid accidental loading of modules into the wrong kernel. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/module.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index a2b775b..543b449 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -40,8 +40,16 @@ struct mod_arch_specific { #define MODULE_ARCH_VERMAGIC_P2V "" #endif +/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */ +#ifdef CONFIG_THUMB2_KERNEL +#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 " +#else +#define MODULE_ARCH_VERMAGIC_ARMTHUMB "" +#endif + #define MODULE_ARCH_VERMAGIC \ MODULE_ARCH_VERMAGIC_ARMVSN \ + MODULE_ARCH_VERMAGIC_ARMTHUMB \ MODULE_ARCH_VERMAGIC_P2V #endif /* _ASM_ARM_MODULE_H */ -- cgit v1.1 From 2839e06c95d12ada034cf9b63da60334c7c6358b Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 8 Mar 2011 06:59:54 +0100 Subject: ARM: 6795/1: l2x0: Errata fix for flush by Way operation can cause data corrupti PL310 implements the Clean & Invalidate by Way L2 cache maintenance operation (offset 0x7FC). This operation runs in background so that PL310 can handle normal accesses while it is in progress. Under very rare circumstances, due to this erratum, write data can be lost when PL310 treats a cacheable write transaction during a Clean & Invalidate by Way operation. Workaround: Disable Write-Back and Cache Linefill (Debug Control Register) Clean & Invalidate by Way (0x7FC) Re-enable Write-Back and Cache Linefill (Debug Control Register) This patch also removes any OMAP dependency on PL310 Errata's Signed-off-by: Santosh Shilimkar Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/outercache.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index fc19009..348d513 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -31,6 +31,7 @@ struct outer_cache_fns { #ifdef CONFIG_OUTER_CACHE_SYNC void (*sync)(void); #endif + void (*set_debug)(unsigned long); }; #ifdef CONFIG_OUTER_CACHE -- cgit v1.1 From d7ed36a4ea84e3a850f9932e2058ceef987d1acd Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 2 Mar 2011 08:03:22 +0100 Subject: ARM: 6777/1: gic: Add hooks for architecture specific extensions Few architectures combine the GIC with an external interrupt controller. On such systems it may be necessary to update both the GIC registers and the external controller's registers to control IRQ behavior. This can be addressed in couple of possible methods. 1. Export common GIC routines along with 'struct irq_chip gic_chip' and allow architectures to have custom function by override. 2. Provide architecture specific function pointer hooks within GIC library and leave platforms to add the necessary code as part of these hooks. First one might be non-intrusive but have few shortcomings like arch needs to have there own custom gic library. Locks used should be common since it caters to same IRQs etc. Maintenance point of view also it leads to multiple file fixes. The second probably is cleaner and portable. It ensures that all the common GIC infrastructure is not touched and also provides archs to address their specific issue. Cc: Russell King Signed-off-by: Santosh Shilimkar Acked-by: Colin Cross Tested-by: Colin Cross Signed-off-by: Russell King --- arch/arm/include/asm/hardware/gic.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 84557d3..0691f9d 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -34,6 +34,7 @@ #ifndef __ASSEMBLY__ extern void __iomem *gic_cpu_base_addr; +extern struct irq_chip gic_arch_extn; void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); void gic_secondary_init(unsigned int); -- cgit v1.1 From 5dab26af1bacad9a7189d904fbc8b4fe8e95dd81 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 4 Mar 2011 12:38:54 +0100 Subject: ARM: 6784/1: errata: no automatic Store Buffer drain on Cortex-A9 On revisions of the Cortex-A9 prior to r2p0, the Store Buffer does not have any automatic draining mechanism and therefore a livelock may occur if an external agent continuously polls a memory location waiting to observe an update. This workaround defines cpu_relax() as smp_mb(), preventing correctly written polling loops from denying visibility of updates to memory. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357ba..7a1f03c 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -95,7 +95,7 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -#if __LINUX_ARM_ARCH__ == 6 +#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) #define cpu_relax() smp_mb() #else #define cpu_relax() barrier() -- cgit v1.1 From 23bfdacf4eb525ff3404161429cedaa281c23e47 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Mar 2011 14:03:01 +0100 Subject: ARM: 6798/1: aout-core: zero thread debug registers in a.out core dump The removal of the single-step emulation from ptrace on ARM means that thread_struct no longer has software breakpoint fields in its debug member. This patch fixes the a.out core dump code so that the debug registers are zeroed rather than trying to copy from non-existent fields. Cc: Nicolas Pitre Signed-off-by: Bryan Wu Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/a.out-core.h | 6 +----- arch/arm/include/asm/user.h | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h index 93d04ac..92f10cb 100644 --- a/arch/arm/include/asm/a.out-core.h +++ b/arch/arm/include/asm/a.out-core.h @@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; dump->u_ssize = 0; - dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; - dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; - dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; - dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; - dump->u_debugreg[4] = tsk->thread.debug.nsaved; + memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg)); if (dump->start_stack < 0x04000000) dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index 05ac4b0..35917b3 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -71,7 +71,7 @@ struct user{ /* the registers. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ - int u_debugreg[8]; + int u_debugreg[8]; /* No longer used */ struct user_fp u_fp; /* FP state */ struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ /* the FP registers. */ -- cgit v1.1 From 10a8c3839810ac9af1aec836d61b92e7a879f5fa Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Mar 2011 14:00:30 +0100 Subject: ARM: 6806/1: irq: introduce entry and exit functions for chained handlers Some chained IRQ handlers are written to cope with primary chips of potentially different flow types. Whether this a sensible thing to do is a point of contention. This patch introduces entry/exit functions for chained handlers which infer the flow type of the primary chip as fasteoi or level-type by checking whether or not the ->irq_eoi function pointer is present and calling back to the primary chip as necessary. Other methods of flow control are not considered. Acked-by: Thomas Gleixner Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/mach/irq.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 22ac140..febe495 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -34,4 +34,35 @@ do { \ raw_spin_unlock(&desc->lock); \ } while(0) +#ifndef __ASSEMBLY__ +/* + * Entry/exit functions for chained handlers where the primary IRQ chip + * may implement either fasteoi or level-trigger flow control. + */ +static inline void chained_irq_enter(struct irq_chip *chip, + struct irq_desc *desc) +{ + /* FastEOI controllers require no action on entry. */ + if (chip->irq_eoi) + return; + + if (chip->irq_mask_ack) { + chip->irq_mask_ack(&desc->irq_data); + } else { + chip->irq_mask(&desc->irq_data); + if (chip->irq_ack) + chip->irq_ack(&desc->irq_data); + } +} + +static inline void chained_irq_exit(struct irq_chip *chip, + struct irq_desc *desc) +{ + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + else + chip->irq_unmask(&desc->irq_data); +} +#endif + #endif -- cgit v1.1