aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp/vfpmodule.c
diff options
context:
space:
mode:
authorcodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
committercodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
commitc6da2cfeb05178a11c6d062a06f8078150ee492f (patch)
treef3b4021d252c52d6463a9b3c1bb7245e399b009c /arch/arm/vfp/vfpmodule.c
parentc6d7c4dbff353eac7919342ae6b3299a378160a6 (diff)
downloadkernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2
samsung update 1
Diffstat (limited to 'arch/arm/vfp/vfpmodule.c')
-rw-r--r--arch/arm/vfp/vfpmodule.c46
1 files changed, 29 insertions, 17 deletions
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f25e7ec..871f03c 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -33,7 +33,13 @@ void vfp_support_entry(void);
void vfp_null_entry(void);
void (*vfp_vector)(void) = vfp_null_entry;
-union vfp_state *last_VFP_context[NR_CPUS];
+
+/*
+ * The pointer to the vfpstate structure of the thread which currently
+ * owns the context held in the VFP hardware, or NULL if the hardware
+ * context is invalid.
+ */
+union vfp_state *vfp_current_hw_state[NR_CPUS];
/*
* Dual-use variable.
@@ -57,12 +63,12 @@ static void vfp_thread_flush(struct thread_info *thread)
/*
* Disable VFP to ensure we initialize it first. We must ensure
- * that the modification of last_VFP_context[] and hardware disable
+ * that the modification of vfp_current_hw_state[] and hardware disable
* are done for the same CPU and without preemption.
*/
cpu = get_cpu();
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
}
@@ -73,8 +79,8 @@ static void vfp_thread_exit(struct thread_info *thread)
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu = get_cpu();
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
put_cpu();
}
@@ -129,9 +135,9 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
- if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
- vfp_save_state(last_VFP_context[cpu], fpexc);
- last_VFP_context[cpu]->hard.cpu = cpu;
+ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+ vfp_current_hw_state[cpu]->hard.cpu = cpu;
}
/*
* Thread migration, just force the reloading of the
@@ -139,7 +145,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
* contain stale data.
*/
if (thread->vfpstate.hard.cpu != cpu)
- last_VFP_context[cpu] = NULL;
+ vfp_current_hw_state[cpu] = NULL;
#endif
/*
@@ -412,10 +418,14 @@ static int vfp_pm_suspend(void)
/* disable, just in case */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ } else if (vfp_current_hw_state[ti->cpu]) {
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
+ fmxr(FPEXC, fpexc);
}
/* clear any information we had about last context state */
- memset(last_VFP_context, 0, sizeof(last_VFP_context));
+ memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
return 0;
}
@@ -451,7 +461,7 @@ void vfp_sync_hwstate(struct thread_info *thread)
* If the thread we're interested in is the current owner of the
* hardware VFP state, then we need to save its state.
*/
- if (last_VFP_context[cpu] == &thread->vfpstate) {
+ if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
u32 fpexc = fmrx(FPEXC);
/*
@@ -473,7 +483,7 @@ void vfp_flush_hwstate(struct thread_info *thread)
* If the thread we're interested in is the current owner of the
* hardware VFP state, then we need to save its state.
*/
- if (last_VFP_context[cpu] == &thread->vfpstate) {
+ if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
u32 fpexc = fmrx(FPEXC);
fmxr(FPEXC, fpexc & ~FPEXC_EN);
@@ -482,7 +492,7 @@ void vfp_flush_hwstate(struct thread_info *thread)
* Set the context to NULL to force a reload the next time
* the thread uses the VFP.
*/
- last_VFP_context[cpu] = NULL;
+ vfp_current_hw_state[cpu] = NULL;
}
#ifdef CONFIG_SMP
@@ -514,7 +524,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
{
if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
unsigned int cpu = (long)hcpu;
- last_VFP_context[cpu] = NULL;
+ vfp_current_hw_state[cpu] = NULL;
} else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
vfp_enable(NULL);
return NOTIFY_OK;
@@ -582,7 +592,6 @@ static int __init vfp_init(void)
elf_hwcap |= HWCAP_VFPv3D16;
}
#endif
-#ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD
* load/store instructions, integer and single
@@ -590,10 +599,13 @@ static int __init vfp_init(void)
* for NEON if the hardware has the MVFR registers.
*/
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+#ifdef CONFIG_NEON
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON;
- }
#endif
+ if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+ elf_hwcap |= HWCAP_VFPv4;
+ }
}
return 0;
}