diff options
author | Wolfgang Wiedmeyer <wolfgit@wiedmeyer.de> | 2015-10-22 21:26:58 +0200 |
---|---|---|
committer | Wolfgang Wiedmeyer <wolfgit@wiedmeyer.de> | 2015-10-22 21:26:58 +0200 |
commit | 786208aedd8f75b0720e36b2ca66b3a411417301 (patch) | |
tree | e6e902ab383ca94842c8691a9e28e1a6aefc5695 /arch/ia64 | |
parent | b99374450c03bf5081b88995d91d34fb9b2fd040 (diff) | |
download | kernel_samsung_smdk4412-786208aedd8f75b0720e36b2ca66b3a411417301.zip kernel_samsung_smdk4412-786208aedd8f75b0720e36b2ca66b3a411417301.tar.gz kernel_samsung_smdk4412-786208aedd8f75b0720e36b2ca66b3a411417301.tar.bz2 |
merged 3.0.101 tag
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/futex.h | 5 | ||||
-rw-r--r-- | arch/ia64/include/asm/mca.h | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 37 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 5 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 2 |
6 files changed, 41 insertions, 17 deletions
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index 21ab376..1bd14d5 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -107,16 +107,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; { - register unsigned long r8 __asm ("r8"); + register unsigned long r8 __asm ("r8") = 0; unsigned long prev; __asm__ __volatile__( " mf;; \n" - " mov %0=r0 \n" " mov ar.ccv=%4;; \n" "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (r8), "=r" (prev) + : "+r" (r8), "=&r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 43f96ab..8c70961 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; extern int cpe_vector; extern int ia64_cpe_irq; extern void ia64_mca_init(void); +extern void ia64_mca_irq_init(void); extern void ia64_mca_cpu_init(void *); extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch_end(void); diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ad69606..f2c41828 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -23,6 +23,8 @@ #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <asm/mca.h> + /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. @@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) #endif /* CONFIG_SMP */ +int __init arch_early_irq_init(void) +{ + ia64_mca_irq_init(); + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU unsigned int vectors_in_migration[NR_IRQS]; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 84fb405..9b97303 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -2071,22 +2071,16 @@ ia64_mca_init(void) printk(KERN_INFO "MCA related initialization done\n"); } + /* - * ia64_mca_late_init - * - * Opportunity to setup things that require initialization later - * than ia64_mca_init. Setup a timer to poll for CPEs if the - * platform doesn't support an interrupt driven mechanism. - * - * Inputs : None - * Outputs : Status + * These pieces cannot be done in ia64_mca_init() because it is called before + * early_irq_init() which would wipe out our percpu irq registrations. But we + * cannot leave them until ia64_mca_late_init() because by then all the other + * processors have been brought online and have set their own CMC vectors to + * point at a non-existant action. Called from arch_early_irq_init(). */ -static int __init -ia64_mca_late_init(void) +void __init ia64_mca_irq_init(void) { - if (!mca_init) - return 0; - /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). @@ -2105,6 +2099,23 @@ ia64_mca_late_init(void) /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif +} + +/* + * ia64_mca_late_init + * + * Opportunity to setup things that require initialization later + * than ia64_mca_init. Setup a timer to poll for CPEs if the + * platform doesn't support an interrupt driven mechanism. + * + * Inputs : None + * Outputs : Status + */ +static int __init +ia64_mca_late_init(void) +{ + if (!mca_init) + return 0; register_hotcpu_notifier(&mca_cpu_notifier); diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 8213efe..a874213 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1168,6 +1168,11 @@ out: #define PALE_RESET_ENTRY 0x80000000ffffffb0UL +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) +{ + return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); +} + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct kvm_vcpu *v; diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 4332f7e..a7869f8 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte) "srlz.d;;" "ssm psr.i;;" "srlz.d;;" - : "=r"(ret) : "r"(iha), "r"(pte):"memory"); + : "=&r"(ret) : "r"(iha), "r"(pte) : "memory"); return ret; } |