aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2005-09-06 13:05:58 +1000
committerPaul Mackerras <paulus@samba.org>2005-09-06 16:07:53 +1000
commitb2c0ab17ba751abe13a28508b1ac7e9ca074cd87 (patch)
tree08abcde7f48b3393afcae069e3668923ee3e4492 /include/asm-ppc64
parent4721e2214b5fd6eca48caea76afb1bad3148930f (diff)
downloadkernel_samsung_smdk4412-b2c0ab17ba751abe13a28508b1ac7e9ca074cd87.zip
kernel_samsung_smdk4412-b2c0ab17ba751abe13a28508b1ac7e9ca074cd87.tar.gz
kernel_samsung_smdk4412-b2c0ab17ba751abe13a28508b1ac7e9ca074cd87.tar.bz2
[PATCH] ppc64: speedup cmpxchg
cmpxchg has the following code: __typeof__(*(ptr)) _o_ = (o); __typeof__(*(ptr)) _n_ = (n); Unfortunately it makes gcc 4.0 store and load the variables to the stack. Eg in atomic_dec_and_test we get: stw r10,112(r1) stw r9,116(r1) lwz r9,112(r1) lwz r0,116(r1) x86 is just casting the values so do that instead. Also change __xchg* and __cmpxchg* to take unsigned values, removing a few sign extensions. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-ppc64')
-rw-r--r--include/asm-ppc64/system.h19
1 files changed, 8 insertions, 11 deletions
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index b9e1835..c039642 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -158,7 +158,7 @@ static inline int __is_processor(unsigned long pv)
* is more like most of the other architectures.
*/
static __inline__ unsigned long
-__xchg_u32(volatile int *m, unsigned long val)
+__xchg_u32(volatile unsigned int *m, unsigned long val)
{
unsigned long dummy;
@@ -200,7 +200,7 @@ __xchg_u64(volatile long *m, unsigned long val)
extern void __xchg_called_with_bad_pointer(void);
static __inline__ unsigned long
-__xchg(volatile void *ptr, unsigned long x, int size)
+__xchg(volatile void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
case 4:
@@ -223,7 +223,7 @@ __xchg(volatile void *ptr, unsigned long x, int size)
#define __HAVE_ARCH_CMPXCHG 1
static __inline__ unsigned long
-__cmpxchg_u32(volatile int *p, int old, int new)
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
{
unsigned int prev;
@@ -271,7 +271,8 @@ __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
extern void __cmpxchg_called_with_bad_pointer(void);
static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
{
switch (size) {
case 4:
@@ -283,13 +284,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
/*
* We handle most unaligned accesses in hardware. On the other hand