aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/process.c36
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/x86_emulate.c7
-rw-r--r--arch/x86/mm/pat.c2
5 files changed, 35 insertions, 14 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 67e9b4a..ba370dc 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -99,15 +99,6 @@ static void mwait_idle(void)
local_irq_enable();
}
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
- if (force_mwait)
- return 1;
- /* Any C1 states supported? */
- return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
/*
* On SMP it's slightly faster (but much more power-consuming!)
* to poll the ->work.need_resched flag instead of waiting for the
@@ -119,6 +110,33 @@ static void poll_idle(void)
cpu_relax();
}
+/*
+ * mwait selection logic:
+ *
+ * It depends on the CPU. For AMD CPUs that support MWAIT this is
+ * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
+ * then depend on a clock divisor and current Pstate of the core. If
+ * all cores of a processor are in halt state (C1) the processor can
+ * enter the C1E (C1 enhanced) state. If mwait is used this will never
+ * happen.
+ *
+ * idle=mwait overrides this decision and forces the usage of mwait.
+ */
+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+{
+ if (force_mwait)
+ return 1;
+
+ if (c->x86_vendor == X86_VENDOR_AMD) {
+ switch(c->x86) {
+ case 0x10:
+ case 0x11:
+ return 0;
+ }
+ }
+ return 1;
+}
+
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
static int selected;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 3324d90..7c077a9 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -216,7 +216,7 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
- if (pit && vcpu->vcpu_id == 0)
+ if (pit && vcpu->vcpu_id == 0 && pit->pit_state.inject_pending)
return atomic_read(&pit->pit_state.pit_timer.pending);
return 0;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 36809d7..c297c50 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -957,7 +957,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *lapic = vcpu->arch.apic;
- if (lapic)
+ if (lapic && apic_enabled(lapic) && apic_lvt_enabled(lapic, APIC_LVTT))
return atomic_read(&lapic->timer.pending);
return 0;
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index f2a696d..8a96320 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -677,8 +677,9 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
c->use_modrm_ea = 1;
if (c->modrm_mod == 3) {
- c->modrm_val = *(unsigned long *)
- decode_register(c->modrm_rm, c->regs, c->d & ByteOp);
+ c->modrm_ptr = decode_register(c->modrm_rm,
+ c->regs, c->d & ByteOp);
+ c->modrm_val = *(unsigned long *)c->modrm_ptr;
return rc;
}
@@ -1005,6 +1006,7 @@ done_prefixes:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->src.type = OP_REG;
c->src.val = c->modrm_val;
+ c->src.ptr = c->modrm_ptr;
break;
}
c->src.type = OP_MEM;
@@ -1049,6 +1051,7 @@ done_prefixes:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->dst.type = OP_REG;
c->dst.val = c->dst.orig_val = c->modrm_val;
+ c->dst.ptr = c->modrm_ptr;
break;
}
c->dst.type = OP_MEM;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index bcb1a8e..de3a998 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -28,7 +28,7 @@
#ifdef CONFIG_X86_PAT
int __read_mostly pat_wc_enabled = 1;
-void __init pat_disable(char *reason)
+void __cpuinit pat_disable(char *reason)
{
pat_wc_enabled = 0;
printk(KERN_INFO "%s\n", reason);