aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/percpu.h
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-05-31 18:45:11 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-09 11:28:30 +1000
commitae01f84b93b274e2f215bdf6d0b46435679b5f9a (patch)
tree59457aa08f2a2ac53f6d00653a267964568cf427 /arch/powerpc/include/asm/percpu.h
parent51c7fdba40e741dfe18455b5e4240b70c422bf2e (diff)
downloadkernel_samsung_smdk4412-ae01f84b93b274e2f215bdf6d0b46435679b5f9a.zip
kernel_samsung_smdk4412-ae01f84b93b274e2f215bdf6d0b46435679b5f9a.tar.gz
kernel_samsung_smdk4412-ae01f84b93b274e2f215bdf6d0b46435679b5f9a.tar.bz2
powerpc: Optimise per cpu accesses on 64bit
Now we dynamically allocate the paca array, it takes an extra load whenever we want to access another cpu's paca. One place we do that a lot is per cpu variables. A simple example: DEFINE_PER_CPU(unsigned long, vara); unsigned long test4(int cpu) { return per_cpu(vara, cpu); } This takes 4 loads, 5 if you include the actual load of the per cpu variable: ld r11,-32760(r30) # load address of paca pointer ld r9,-32768(r30) # load link address of percpu variable sldi r3,r29,9 # get offset into paca (each entry is 512 bytes) ld r0,0(r11) # load paca pointer add r3,r0,r3 # paca + offset ld r11,64(r3) # load paca[cpu].data_offset ldx r3,r9,r11 # load per cpu variable If we remove the ppc64 specific per_cpu_offset(), we get the generic one which indexes into a statically allocated array. This removes one load and one add: ld r11,-32760(r30) # load address of __per_cpu_offset ld r9,-32768(r30) # load link address of percpu variable sldi r3,r29,3 # get offset into __per_cpu_offset (each entry 8 bytes) ldx r11,r11,r3 # load __per_cpu_offset[cpu] ldx r3,r9,r11 # load per cpu variable Having all the offsets in one array also helps when iterating over a per cpu variable across a number of cpus, such as in the scheduler. Before we would need to load one paca cacheline when calculating each per cpu offset. Now we have 16 (128 / sizeof(long)) per cpu offsets in each cacheline. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/percpu.h')
-rw-r--r--arch/powerpc/include/asm/percpu.h3
1 files changed, 0 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
index f879252..2cedefd 100644
--- a/arch/powerpc/include/asm/percpu.h
+++ b/arch/powerpc/include/asm/percpu.h
@@ -1,7 +1,6 @@
#ifndef _ASM_POWERPC_PERCPU_H_
#define _ASM_POWERPC_PERCPU_H_
#ifdef __powerpc64__
-#include <linux/compiler.h>
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
@@ -12,9 +11,7 @@
#include <asm/paca.h>
-#define __per_cpu_offset(cpu) (paca[cpu].data_offset)
#define __my_cpu_offset local_paca->data_offset
-#define per_cpu_offset(x) (__per_cpu_offset(x))
#endif /* CONFIG_SMP */
#endif /* __powerpc64__ */