aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig4
-rw-r--r--arch/i386/boot/video.S2
-rw-r--r--arch/i386/kernel/acpi/boot.c9
-rw-r--r--arch/i386/kernel/cpu/common.c12
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c7
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c56
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c2
-rw-r--r--arch/i386/kernel/crash.c10
-rw-r--r--arch/i386/kernel/efi.c3
-rw-r--r--arch/i386/kernel/i8237.c5
-rw-r--r--arch/i386/kernel/i8259.c45
-rw-r--r--arch/i386/kernel/io_apic.c495
-rw-r--r--arch/i386/kernel/irq.c19
-rw-r--r--arch/i386/kernel/kprobes.c41
-rw-r--r--arch/i386/kernel/ldt.c2
-rw-r--r--arch/i386/kernel/nmi.c1
-rw-r--r--arch/i386/kernel/process.c11
-rw-r--r--arch/i386/kernel/smp.c27
-rw-r--r--arch/i386/kernel/smpboot.c5
-rw-r--r--arch/i386/kernel/sys_i386.c42
-rw-r--r--arch/i386/kernel/time.c3
-rw-r--r--arch/i386/kernel/traps.c6
-rw-r--r--arch/i386/lib/delay.c1
-rw-r--r--arch/i386/mach-visws/visws_apic.c2
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c7
-rw-r--r--arch/i386/mm/discontig.c3
-rw-r--r--arch/i386/mm/highmem.c18
-rw-r--r--arch/i386/mm/init.c4
-rw-r--r--arch/i386/mm/ioremap.c84
-rw-r--r--arch/i386/pci/fixup.c2
-rw-r--r--arch/i386/pci/irq.c34
-rw-r--r--arch/i386/pci/mmconfig.c5
32 files changed, 516 insertions, 451 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 3fd2f25..8ff1c6f 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -682,7 +682,7 @@ config EFI
depends on ACPI
default n
---help---
- This enables the the kernel to boot on EFI platforms using
+ This enables the kernel to boot on EFI platforms using
system configuration information passed to it from the firmware.
This also enables the kernel to use any EFI runtime services that are
available (such as the EFI variable services).
@@ -1142,7 +1142,7 @@ source "arch/i386/oprofile/Kconfig"
config KPROBES
bool "Kprobes (EXPERIMENTAL)"
- depends on EXPERIMENTAL && MODULES
+ depends on KALLSYMS && EXPERIMENTAL && MODULES
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S
index 8c2a6fa..2c5b5cc 100644
--- a/arch/i386/boot/video.S
+++ b/arch/i386/boot/video.S
@@ -11,8 +11,6 @@
*
*/
-#include <linux/config.h> /* for CONFIG_VIDEO_* */
-
/* Enable autodetection of SVGA adapters and modes. */
#undef CONFIG_VIDEO_SVGA
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 1aaea6a..92f79cd 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -62,8 +62,6 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
#include <mach_mpparse.h>
#endif /* CONFIG_X86_LOCAL_APIC */
-static inline int gsi_irq_sharing(int gsi) { return gsi; }
-
#endif /* X86 */
#define BAD_MADT_ENTRY(entry, end) ( \
@@ -468,12 +466,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
-#ifdef CONFIG_X86_IO_APIC
- if (use_pci_vector() && !platform_legacy_irq(gsi))
- *irq = IO_APIC_VECTOR(gsi);
- else
-#endif
- *irq = gsi_irq_sharing(gsi);
+ *irq = gsi;
return 0;
}
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 2799baa..d9f3e3c 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -184,7 +184,16 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
static int __init x86_fxsr_setup(char * s)
{
+ /* Tell all the other CPU's to not use it... */
disable_x86_fxsr = 1;
+
+ /*
+ * ... and clear the bits early in the boot_cpu_data
+ * so that the bootup process doesn't try to do this
+ * either.
+ */
+ clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
+ clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
return 1;
}
__setup("nofxsr", x86_fxsr_setup);
@@ -660,8 +669,7 @@ old_gdt:
*/
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- if (current->mm)
- BUG();
+ BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
load_esp0(t, thread);
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index ea19d09..57c880b 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -396,13 +396,13 @@ static int acpi_cpufreq_early_init_acpi(void)
*/
static int bios_with_sw_any_bug;
-static int __init sw_any_bug_found(struct dmi_system_id *d)
+static int sw_any_bug_found(struct dmi_system_id *d)
{
bios_with_sw_any_bug = 1;
return 0;
}
-static struct dmi_system_id __initdata sw_any_bug_dmi_table[] = {
+static struct dmi_system_id sw_any_bug_dmi_table[] = {
{
.callback = sw_any_bug_found,
.ident = "Supermicro Server X6DLP",
@@ -597,7 +597,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.name = "acpi-cpufreq",
.owner = THIS_MODULE,
.attr = acpi_cpufreq_attr,
- .flags = CPUFREQ_STICKY,
};
@@ -608,7 +607,7 @@ acpi_cpufreq_init (void)
acpi_cpufreq_early_init_acpi();
- return cpufreq_register_driver(&acpi_cpufreq_driver);
+ return cpufreq_register_driver(&acpi_cpufreq_driver);
}
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index f5cc9f5..7233abe 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -178,11 +178,17 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
safe_halt();
/* Change frequency on next halt or sleep */
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
- ACPI_FLUSH_CPU_CACHE();
- /* Invoke C3 */
- inb(cx_address);
- /* Dummy op - must do something useless after P_LVL3 read */
- t = inl(acpi_fadt.xpm_tmr_blk.address);
+ if (port22_en) {
+ ACPI_FLUSH_CPU_CACHE();
+ /* Invoke C1 */
+ halt();
+ } else {
+ ACPI_FLUSH_CPU_CACHE();
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3 read */
+ t = inl(acpi_fadt.xpm_tmr_blk.address);
+ }
/* Disable bus ratio bit */
local_irq_disable();
@@ -567,16 +573,23 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
static int enable_arbiter_disable(void)
{
struct pci_dev *dev;
+ int reg;
u8 pci_cmd;
/* Find PLE133 host bridge */
+ reg = 0x78;
dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, NULL);
+ /* Find CLE266 host bridge */
+ if (dev == NULL) {
+ reg = 0x76;
+ dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL);
+ }
if (dev != NULL) {
/* Enable access to port 0x22 */
- pci_read_config_byte(dev, 0x78, &pci_cmd);
+ pci_read_config_byte(dev, reg, &pci_cmd);
if ( !(pci_cmd & 1<<7) ) {
pci_cmd |= 1<<7;
- pci_write_config_byte(dev, 0x78, pci_cmd);
+ pci_write_config_byte(dev, reg, pci_cmd);
}
return 1;
}
@@ -680,20 +693,25 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
if (longhaul_version == TYPE_POWERSAVER) {
/* Check ACPI support for C3 state */
cx = &pr->power.states[ACPI_STATE_C3];
- if (cx->address == 0 ||
- (cx->latency > 1000 && ignore_latency == 0) )
+ if (cx->address > 0 &&
+ (cx->latency <= 1000 || ignore_latency != 0) ) {
+ goto print_support_type;
+ }
+ }
+ /* Check ACPI support for bus master arbiter disable */
+ if (!pr->flags.bm_control) {
+ if (enable_arbiter_disable()) {
+ port22_en = 1;
+ } else {
goto err_acpi;
-
- } else {
- /* Check ACPI support for bus master arbiter disable */
- if (!pr->flags.bm_control) {
- if (!enable_arbiter_disable()) {
- printk(KERN_ERR PFX "No ACPI support. No VT8601 host bridge. Aborting.\n");
- return -ENODEV;
- } else
- port22_en = 1;
}
}
+print_support_type:
+ if (!port22_en) {
+ printk (KERN_INFO PFX "Using ACPI support.\n");
+ } else {
+ printk (KERN_INFO PFX "Using northbridge support.\n");
+ }
ret = longhaul_get_ranges();
if (ret != 0)
@@ -716,7 +734,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
return 0;
err_acpi:
- printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n");
+ printk(KERN_ERR PFX "No ACPI support. No VT8601 or VT8623 northbridge. Aborting.\n");
return -ENODEV;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 7a93253..e8993ba 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -386,7 +386,7 @@ static int centrino_cpu_early_init_acpi(void)
* than OS intended it to run at. Detect it and handle it cleanly.
*/
static int bios_with_sw_any_bug;
-static int __init sw_any_bug_found(struct dmi_system_id *d)
+static int sw_any_bug_found(struct dmi_system_id *d)
{
bios_with_sw_any_bug = 1;
return 0;
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 67d297d..144b432 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -23,6 +23,7 @@
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/kdebug.h>
+#include <asm/smp.h>
#include <mach_ipi.h>
@@ -88,7 +89,7 @@ static void crash_save_self(struct pt_regs *regs)
{
int cpu;
- cpu = smp_processor_id();
+ cpu = safe_smp_processor_id();
crash_save_this_cpu(regs, cpu);
}
@@ -133,7 +134,10 @@ static int crash_nmi_callback(struct notifier_block *self,
static void smp_send_nmi_allbutself(void)
{
- send_IPI_allbutself(NMI_VECTOR);
+ cpumask_t mask = cpu_online_map;
+ cpu_clear(safe_smp_processor_id(), mask);
+ if (!cpus_empty(mask))
+ send_IPI_mask(mask, NMI_VECTOR);
}
static struct notifier_block crash_nmi_nb = {
@@ -185,7 +189,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
local_irq_disable();
/* Make a note of crashing cpu. Will be used in NMI callback.*/
- crashing_cpu = smp_processor_id();
+ crashing_cpu = safe_smp_processor_id();
nmi_shootdown_cpus();
lapic_shutdown();
#if defined(CONFIG_X86_IO_APIC)
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index f943698..8b40648 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -498,8 +498,7 @@ void __init efi_enter_virtual_mode(void)
check_range_for_systab(md);
}
- if (!efi.systab)
- BUG();
+ BUG_ON(!efi.systab);
status = phys_efi_set_virtual_address_map(
memmap.desc_size * memmap.nr_map,
diff --git a/arch/i386/kernel/i8237.c b/arch/i386/kernel/i8237.c
index c36d1c0..6f508e8 100644
--- a/arch/i386/kernel/i8237.c
+++ b/arch/i386/kernel/i8237.c
@@ -2,6 +2,11 @@
* i8237.c: 8237A DMA controller suspend functions.
*
* Written by Pierre Ossman, 2005.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
*/
#include <linux/init.h>
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index ea5f4e7..d07ed31 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -34,35 +34,15 @@
* moves to arch independent land
*/
-DEFINE_SPINLOCK(i8259A_lock);
-
-static void end_8259A_irq (unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
- irq_desc[irq].action)
- enable_8259A_irq(irq);
-}
-
-#define shutdown_8259A_irq disable_8259A_irq
-
static int i8259A_auto_eoi;
-
+DEFINE_SPINLOCK(i8259A_lock);
static void mask_and_ack_8259A(unsigned int);
-unsigned int startup_8259A_irq(unsigned int irq)
-{
- enable_8259A_irq(irq);
- return 0; /* never anything pending */
-}
-
-static struct hw_interrupt_type i8259A_irq_type = {
- .typename = "XT-PIC",
- .startup = startup_8259A_irq,
- .shutdown = shutdown_8259A_irq,
- .enable = enable_8259A_irq,
- .disable = disable_8259A_irq,
- .ack = mask_and_ack_8259A,
- .end = end_8259A_irq,
+static struct irq_chip i8259A_chip = {
+ .name = "XT-PIC",
+ .mask = disable_8259A_irq,
+ .unmask = enable_8259A_irq,
+ .mask_ack = mask_and_ack_8259A,
};
/*
@@ -133,7 +113,7 @@ void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
- irq_desc[irq].chip = &i8259A_irq_type;
+ set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
enable_irq(irq);
}
@@ -327,12 +307,12 @@ void init_8259A(int auto_eoi)
outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
if (auto_eoi)
/*
- * in AEOI mode we just have to mask the interrupt
+ * In AEOI mode we just have to mask the interrupt
* when acking.
*/
- i8259A_irq_type.ack = disable_8259A_irq;
+ i8259A_chip.mask_ack = disable_8259A_irq;
else
- i8259A_irq_type.ack = mask_and_ack_8259A;
+ i8259A_chip.mask_ack = mask_and_ack_8259A;
udelay(100); /* wait for 8259A to initialize */
@@ -389,12 +369,13 @@ void __init init_ISA_irqs (void)
/*
* 16 old-style INTA-cycle interrupts:
*/
- irq_desc[i].chip = &i8259A_irq_type;
+ set_irq_chip_and_handler(i, &i8259A_chip,
+ handle_level_irq);
} else {
/*
* 'high' PCI IRQs filled in on demand
*/
- irq_desc[i].chip = &no_irq_type;
+ irq_desc[i].chip = &no_irq_chip;
}
}
}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fd0df75..b7287fb 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -31,6 +31,9 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/sysdev.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/htirq.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -38,6 +41,8 @@
#include <asm/timer.h>
#include <asm/i8259.h>
#include <asm/nmi.h>
+#include <asm/msidef.h>
+#include <asm/hypertransport.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
@@ -86,15 +91,6 @@ static struct irq_pin_list {
int apic, pin, next;
} irq_2_pin[PIN_MAP_SIZE];
-int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
-#ifdef CONFIG_PCI_MSI
-#define vector_to_irq(vector) \
- (platform_legacy_irq(vector) ? vector : vector_irq[vector])
-#else
-#define vector_to_irq(vector) (vector)
-#endif
-
-
union entry_union {
struct { u32 w1, w2; };
struct IO_APIC_route_entry entry;
@@ -280,7 +276,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
break;
entry = irq_2_pin + entry->next;
}
- set_irq_info(irq, cpumask);
+ set_native_irq_info(irq, cpumask);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -1181,46 +1177,45 @@ static inline int IO_APIC_irq_trigger(int irq)
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
-int assign_irq_vector(int irq)
+static int __assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
- unsigned long flags;
int vector;
- BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
+ BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
- spin_lock_irqsave(&vector_lock, flags);
-
- if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
- spin_unlock_irqrestore(&vector_lock, flags);
+ if (IO_APIC_VECTOR(irq) > 0)
return IO_APIC_VECTOR(irq);
- }
-next:
+
current_vector += 8;
if (current_vector == SYSCALL_VECTOR)
- goto next;
+ current_vector += 8;
if (current_vector >= FIRST_SYSTEM_VECTOR) {
offset++;
- if (!(offset%8)) {
- spin_unlock_irqrestore(&vector_lock, flags);
+ if (!(offset % 8))
return -ENOSPC;
- }
current_vector = FIRST_DEVICE_VECTOR + offset;
}
vector = current_vector;
- vector_irq[vector] = irq;
- if (irq != AUTO_ASSIGN)
- IO_APIC_VECTOR(irq) = vector;
+ IO_APIC_VECTOR(irq) = vector;
+
+ return vector;
+}
+
+static int assign_irq_vector(int irq)
+{
+ unsigned long flags;
+ int vector;
+ spin_lock_irqsave(&vector_lock, flags);
+ vector = __assign_irq_vector(irq);
spin_unlock_irqrestore(&vector_lock, flags);
return vector;
}
-
-static struct hw_interrupt_type ioapic_level_type;
-static struct hw_interrupt_type ioapic_edge_type;
+static struct irq_chip ioapic_chip;
#define IOAPIC_AUTO -1
#define IOAPIC_EDGE 0
@@ -1228,16 +1223,14 @@ static struct hw_interrupt_type ioapic_edge_type;
static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
- unsigned idx;
-
- idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
-
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
- irq_desc[idx].chip = &ioapic_level_type;
+ set_irq_chip_and_handler(irq, &ioapic_chip,
+ handle_fasteoi_irq);
else
- irq_desc[idx].chip = &ioapic_edge_type;
- set_intr_gate(vector, interrupt[idx]);
+ set_irq_chip_and_handler(irq, &ioapic_chip,
+ handle_edge_irq);
+ set_intr_gate(vector, interrupt[irq]);
}
static void __init setup_IO_APIC_irqs(void)
@@ -1346,7 +1339,8 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
* The timer IRQ doesn't have to know that behind the
* scene we have a 8259A-master in AEOI mode ...
*/
- irq_desc[0].chip = &ioapic_edge_type;
+ irq_desc[0].chip = &ioapic_chip;
+ set_irq_handler(0, handle_edge_irq);
/*
* Add it to the IO-APIC irq-routing table:
@@ -1481,17 +1475,12 @@ void __init print_IO_APIC(void)
);
}
}
- if (use_pci_vector())
- printk(KERN_INFO "Using vector-based indexing\n");
printk(KERN_DEBUG "IRQ to pin mappings:\n");
for (i = 0; i < NR_IRQS; i++) {
struct irq_pin_list *entry = irq_2_pin + i;
if (entry->pin < 0)
continue;
- if (use_pci_vector() && !platform_legacy_irq(i))
- printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
- else
- printk(KERN_DEBUG "IRQ%d ", i);
+ printk(KERN_DEBUG "IRQ%d ", i);
for (;;) {
printk("-> %d:%d", entry->apic, entry->pin);
if (!entry->next)
@@ -1918,6 +1907,8 @@ static int __init timer_irq_works(void)
*/
/*
+ * Startup quirk:
+ *
* Starting up a edge-triggered IO-APIC interrupt is
* nasty - we need to make sure that we get the edge.
* If it is already asserted for some reason, we need
@@ -1925,8 +1916,10 @@ static int __init timer_irq_works(void)
*
* This is not complete - we should be able to fake
* an edge even if it isn't on the 8259A...
+ *
+ * (We do this for level-triggered IRQs too - it cannot hurt.)
*/
-static unsigned int startup_edge_ioapic_irq(unsigned int irq)
+static unsigned int startup_ioapic_irq(unsigned int irq)
{
int was_pending = 0;
unsigned long flags;
@@ -1943,47 +1936,18 @@ static unsigned int startup_edge_ioapic_irq(unsigned int irq)
return was_pending;
}
-/*
- * Once we have recorded IRQ_PENDING already, we can mask the
- * interrupt for real. This prevents IRQ storms from unhandled
- * devices.
- */
-static void ack_edge_ioapic_irq(unsigned int irq)
+static void ack_ioapic_irq(unsigned int irq)
{
- move_irq(irq);
- if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
- == (IRQ_PENDING | IRQ_DISABLED))
- mask_IO_APIC_irq(irq);
+ move_native_irq(irq);
ack_APIC_irq();
}
-/*
- * Level triggered interrupts can just be masked,
- * and shutting down and starting up the interrupt
- * is the same as enabling and disabling them -- except
- * with a startup need to return a "was pending" value.
- *
- * Level triggered interrupts are special because we
- * do not touch any IO-APIC register while handling
- * them. We ack the APIC in the end-IRQ handler, not
- * in the start-IRQ-handler. Protection against reentrance
- * from the same interrupt is still provided, both by the
- * generic IRQ layer and by the fact that an unacked local
- * APIC does not accept IRQs.
- */
-static unsigned int startup_level_ioapic_irq (unsigned int irq)
-{
- unmask_IO_APIC_irq(irq);
-
- return 0; /* don't check for pending */
-}
-
-static void end_level_ioapic_irq (unsigned int irq)
+static void ack_ioapic_quirk_irq(unsigned int irq)
{
unsigned long v;
int i;
- move_irq(irq);
+ move_native_irq(irq);
/*
* It appears there is an erratum which affects at least version 0x11
* of I/O APIC (that's the 82093AA and cores integrated into various
@@ -2018,105 +1982,26 @@ static void end_level_ioapic_irq (unsigned int irq)
}
}
-#ifdef CONFIG_PCI_MSI
-static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-{
- int irq = vector_to_irq(vector);
-
- return startup_edge_ioapic_irq(irq);
-}
-
-static void ack_edge_ioapic_vector(unsigned int vector)
-{
- int irq = vector_to_irq(vector);
-
- move_native_irq(vector);
- ack_edge_ioapic_irq(irq);
-}
-
-static unsigned int startup_level_ioapic_vector (unsigned int vector)
-{
- int irq = vector_to_irq(vector);
-
- return startup_level_ioapic_irq (irq);
-}
-
-static void end_level_ioapic_vector (unsigned int vector)
-{
- int irq = vector_to_irq(vector);
-
- move_native_irq(vector);
- end_level_ioapic_irq(irq);
-}
-
-static void mask_IO_APIC_vector (unsigned int vector)
-{
- int irq = vector_to_irq(vector);
-
- mask_IO_APIC_irq(irq);
-}
-
-static void unmask_IO_APIC_vector (unsigned int vector)
-{
- int irq = vector_to_irq(vector);
-
- unmask_IO_APIC_irq(irq);
-}
-
-#ifdef CONFIG_SMP
-static void set_ioapic_affinity_vector (unsigned int vector,
- cpumask_t cpu_mask)
-{
- int irq = vector_to_irq(vector);
-
- set_native_irq_info(vector, cpu_mask);
- set_ioapic_affinity_irq(irq, cpu_mask);
-}
-#endif
-#endif
-
-static int ioapic_retrigger(unsigned int irq)
+static int ioapic_retrigger_irq(unsigned int irq)
{
send_IPI_self(IO_APIC_VECTOR(irq));
return 1;
}
-/*
- * Level and edge triggered IO-APIC interrupts need different handling,
- * so we use two separate IRQ descriptors. Edge triggered IRQs can be
- * handled with the level-triggered descriptor, but that one has slightly
- * more overhead. Level-triggered interrupts cannot be handled with the
- * edge-triggered handler, without risking IRQ storms and other ugly
- * races.
- */
-static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
- .typename = "IO-APIC-edge",
- .startup = startup_edge_ioapic,
- .shutdown = shutdown_edge_ioapic,
- .enable = enable_edge_ioapic,
- .disable = disable_edge_ioapic,
- .ack = ack_edge_ioapic,
- .end = end_edge_ioapic,
+static struct irq_chip ioapic_chip __read_mostly = {
+ .name = "IO-APIC",
+ .startup = startup_ioapic_irq,
+ .mask = mask_IO_APIC_irq,
+ .unmask = unmask_IO_APIC_irq,
+ .ack = ack_ioapic_irq,
+ .eoi = ack_ioapic_quirk_irq,
#ifdef CONFIG_SMP
- .set_affinity = set_ioapic_affinity,
+ .set_affinity = set_ioapic_affinity_irq,
#endif
- .retrigger = ioapic_retrigger,
+ .retrigger = ioapic_retrigger_irq,
};
-static struct hw_interrupt_type ioapic_level_type __read_mostly = {
- .typename = "IO-APIC-level",
- .startup = startup_level_ioapic,
- .shutdown = shutdown_level_ioapic,
- .enable = enable_level_ioapic,
- .disable = disable_level_ioapic,
- .ack = mask_and_ack_level_ioapic,
- .end = end_level_ioapic,
-#ifdef CONFIG_SMP
- .set_affinity = set_ioapic_affinity,
-#endif
- .retrigger = ioapic_retrigger,
-};
static inline void init_IO_APIC_traps(void)
{
@@ -2135,11 +2020,6 @@ static inline void init_IO_APIC_traps(void)
*/
for (irq = 0; irq < NR_IRQS ; irq++) {
int tmp = irq;
- if (use_pci_vector()) {
- if (!platform_legacy_irq(tmp))
- if ((tmp = vector_to_irq(tmp)) == -1)
- continue;
- }
if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
/*
* Hmm.. We don't have an entry for this,
@@ -2150,20 +2030,21 @@ static inline void init_IO_APIC_traps(void)
make_8259A_irq(irq);
else
/* Strange. Oh, well.. */
- irq_desc[irq].chip = &no_irq_type;
+ irq_desc[irq].chip = &no_irq_chip;
}
}
}
-static void enable_lapic_irq (unsigned int irq)
-{
- unsigned long v;
+/*
+ * The local APIC irq-chip implementation:
+ */
- v = apic_read(APIC_LVT0);
- apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
+static void ack_apic(unsigned int irq)
+{
+ ack_APIC_irq();
}
-static void disable_lapic_irq (unsigned int irq)
+static void mask_lapic_irq (unsigned int irq)
{
unsigned long v;
@@ -2171,21 +2052,19 @@ static void disable_lapic_irq (unsigned int irq)
apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
}
-static void ack_lapic_irq (unsigned int irq)
+static void unmask_lapic_irq (unsigned int irq)
{
- ack_APIC_irq();
-}
+ unsigned long v;
-static void end_lapic_irq (unsigned int i) { /* nothing */ }
+ v = apic_read(APIC_LVT0);
+ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
+}
-static struct hw_interrupt_type lapic_irq_type __read_mostly = {
- .typename = "local-APIC-edge",
- .startup = NULL, /* startup_irq() not used for IRQ0 */
- .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
- .enable = enable_lapic_irq,
- .disable = disable_lapic_irq,
- .ack = ack_lapic_irq,
- .end = end_lapic_irq
+static struct irq_chip lapic_chip __read_mostly = {
+ .name = "local-APIC-edge",
+ .mask = mask_lapic_irq,
+ .unmask = unmask_lapic_irq,
+ .eoi = ack_apic,
};
static void setup_nmi (void)
@@ -2356,7 +2235,7 @@ static inline void check_timer(void)
printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
disable_8259A_irq(0);
- irq_desc[0].chip = &lapic_irq_type;
+ set_irq_chip_and_handler(0, &lapic_chip, handle_fasteoi_irq);
apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
@@ -2531,6 +2410,238 @@ static int __init ioapic_init_sysfs(void)
device_initcall(ioapic_init_sysfs);
+/*
+ * Dynamic irq allocate and deallocation
+ */
+int create_irq(void)
+{
+ /* Allocate an unused irq */
+ int irq, new, vector;
+ unsigned long flags;
+
+ irq = -ENOSPC;
+ spin_lock_irqsave(&vector_lock, flags);
+ for (new = (NR_IRQS - 1); new >= 0; new--) {
+ if (platform_legacy_irq(new))
+ continue;
+ if (irq_vector[new] != 0)
+ continue;
+ vector = __assign_irq_vector(new);
+ if (likely(vector > 0))
+ irq = new;
+ break;
+ }
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ if (irq >= 0) {
+ set_intr_gate(vector, interrupt[irq]);
+ dynamic_irq_init(irq);
+ }
+ return irq;
+}
+
+void destroy_irq(unsigned int irq)
+{
+ unsigned long flags;
+
+ dynamic_irq_cleanup(irq);
+
+ spin_lock_irqsave(&vector_lock, flags);
+ irq_vector[irq] = 0;
+ spin_unlock_irqrestore(&vector_lock, flags);
+}
+
+/*
+ * MSI mesage composition
+ */
+#ifdef CONFIG_PCI_MSI
+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
+{
+ int vector;
+ unsigned dest;
+
+ vector = assign_irq_vector(irq);
+ if (vector >= 0) {
+ dest = cpu_mask_to_apicid(TARGET_CPUS);
+
+ msg->address_hi = MSI_ADDR_BASE_HI;
+ msg->address_lo =
+ MSI_ADDR_BASE_LO |
+ ((INT_DEST_MODE == 0) ?
+ MSI_ADDR_DEST_MODE_PHYSICAL:
+ MSI_ADDR_DEST_MODE_LOGICAL) |
+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_ADDR_REDIRECTION_CPU:
+ MSI_ADDR_REDIRECTION_LOWPRI) |
+ MSI_ADDR_DEST_ID(dest);
+
+ msg->data =
+ MSI_DATA_TRIGGER_EDGE |
+ MSI_DATA_LEVEL_ASSERT |
+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_DATA_DELIVERY_FIXED:
+ MSI_DATA_DELIVERY_LOWPRI) |
+ MSI_DATA_VECTOR(vector);
+ }
+ return vector;
+}
+
+#ifdef CONFIG_SMP
+static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
+{
+ struct msi_msg msg;
+ unsigned int dest;
+ cpumask_t tmp;
+ int vector;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+ tmp = TARGET_CPUS;
+
+ vector = assign_irq_vector(irq);
+ if (vector < 0)
+ return;
+
+ dest = cpu_mask_to_apicid(mask);
+
+ read_msi_msg(irq, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ write_msi_msg(irq, &msg);
+ set_native_irq_info(irq, mask);
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
+ * which implement the MSI or MSI-X Capability Structure.
+ */
+static struct irq_chip msi_chip = {
+ .name = "PCI-MSI",
+ .unmask = unmask_msi_irq,
+ .mask = mask_msi_irq,
+ .ack = ack_ioapic_irq,
+#ifdef CONFIG_SMP
+ .set_affinity = set_msi_irq_affinity,
+#endif
+ .retrigger = ioapic_retrigger_irq,
+};
+
+int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
+{
+ struct msi_msg msg;
+ int ret;
+ ret = msi_compose_msg(dev, irq, &msg);
+ if (ret < 0)
+ return ret;
+
+ write_msi_msg(irq, &msg);
+
+ set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq);
+
+ return 0;
+}
+
+void arch_teardown_msi_irq(unsigned int irq)
+{
+ return;
+}
+
+#endif /* CONFIG_PCI_MSI */
+
+/*
+ * Hypertransport interrupt support
+ */
+#ifdef CONFIG_HT_IRQ
+
+#ifdef CONFIG_SMP
+
+static void target_ht_irq(unsigned int irq, unsigned int dest)
+{
+ u32 low, high;
+ low = read_ht_irq_low(irq);
+ high = read_ht_irq_high(irq);
+
+ low &= ~(HT_IRQ_LOW_DEST_ID_MASK);
+ high &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+
+ low |= HT_IRQ_LOW_DEST_ID(dest);
+ high |= HT_IRQ_HIGH_DEST_ID(dest);
+
+ write_ht_irq_low(irq, low);
+ write_ht_irq_high(irq, high);
+}
+
+static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
+{
+ unsigned int dest;
+ cpumask_t tmp;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+ tmp = TARGET_CPUS;
+
+ cpus_and(mask, tmp, CPU_MASK_ALL);
+
+ dest = cpu_mask_to_apicid(mask);
+
+ target_ht_irq(irq, dest);
+ set_native_irq_info(irq, mask);
+}
+#endif
+
+static struct hw_interrupt_type ht_irq_chip = {
+ .name = "PCI-HT",
+ .mask = mask_ht_irq,
+ .unmask = unmask_ht_irq,
+ .ack = ack_ioapic_irq,
+#ifdef CONFIG_SMP
+ .set_affinity = set_ht_irq_affinity,
+#endif
+ .retrigger = ioapic_retrigger_irq,
+};
+
+int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
+{
+ int vector;
+
+ vector = assign_irq_vector(irq);
+ if (vector >= 0) {
+ u32 low, high;
+ unsigned dest;
+ cpumask_t tmp;
+
+ cpus_clear(tmp);
+ cpu_set(vector >> 8, tmp);
+ dest = cpu_mask_to_apicid(tmp);
+
+ high = HT_IRQ_HIGH_DEST_ID(dest);
+
+ low = HT_IRQ_LOW_BASE |
+ HT_IRQ_LOW_DEST_ID(dest) |
+ HT_IRQ_LOW_VECTOR(vector) |
+ ((INT_DEST_MODE == 0) ?
+ HT_IRQ_LOW_DM_PHYSICAL :
+ HT_IRQ_LOW_DM_LOGICAL) |
+ HT_IRQ_LOW_RQEOI_EDGE |
+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ HT_IRQ_LOW_MT_FIXED :
+ HT_IRQ_LOW_MT_ARBITRATED) |
+ HT_IRQ_LOW_IRQ_MASKED;
+
+ write_ht_irq_low(irq, low);
+ write_ht_irq_high(irq, high);
+
+ set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq);
+ }
+ return vector;
+}
+#endif /* CONFIG_HT_IRQ */
+
/* --------------------------------------------------------------------------
ACPI-based IOAPIC Configuration
-------------------------------------------------------------------------- */
@@ -2684,7 +2795,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
ioapic_write_entry(ioapic, pin, entry);
spin_lock_irqsave(&ioapic_lock, flags);
- set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
+ set_native_irq_info(irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 5fe547c..3dd2e18 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -55,6 +55,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
{
/* high bit used in ret_from_ code */
int irq = ~regs->orig_eax;
+ struct irq_desc *desc = irq_desc + irq;
#ifdef CONFIG_4KSTACKS
union irq_ctx *curctx, *irqctx;
u32 *isp;
@@ -94,7 +95,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
* current stack (which is the irq stack already after all)
*/
if (curctx != irqctx) {
- int arg1, arg2, ebx;
+ int arg1, arg2, arg3, ebx;
/* build the stack frame on the IRQ stack */
isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
@@ -110,16 +111,17 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
asm volatile(
- " xchgl %%ebx,%%esp \n"
- " call __do_IRQ \n"
+ " xchgl %%ebx,%%esp \n"
+ " call *%%edi \n"
" movl %%ebx,%%esp \n"
- : "=a" (arg1), "=d" (arg2), "=b" (ebx)
- : "0" (irq), "1" (regs), "2" (isp)
- : "memory", "cc", "ecx"
+ : "=a" (arg1), "=d" (arg2), "=c" (arg3), "=b" (ebx)
+ : "0" (irq), "1" (desc), "2" (regs), "3" (isp),
+ "D" (desc->handle_irq)
+ : "memory", "cc"
);
} else
#endif
- __do_IRQ(irq, regs);
+ desc->handle_irq(irq, desc, regs);
irq_exit();
@@ -253,7 +255,8 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
+ seq_printf(p, " %8s", irq_desc[i].chip->name);
+ seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index afe6505..d98e44b 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -230,20 +230,20 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
unsigned long *sara = (unsigned long *)&regs->esp;
- struct kretprobe_instance *ri;
- if ((ri = get_free_rp_inst(rp)) != NULL) {
- ri->rp = rp;
- ri->task = current;
+ struct kretprobe_instance *ri;
+
+ if ((ri = get_free_rp_inst(rp)) != NULL) {
+ ri->rp = rp;
+ ri->task = current;
ri->ret_addr = (kprobe_opcode_t *) *sara;
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
-
- add_rp_inst(ri);
- } else {
- rp->nmissed++;
- }
+ add_rp_inst(ri);
+ } else {
+ rp->nmissed++;
+ }
}
/*
@@ -359,7 +359,7 @@ no_kprobe:
void __kprobes kretprobe_trampoline_holder(void)
{
asm volatile ( ".global kretprobe_trampoline\n"
- "kretprobe_trampoline: \n"
+ "kretprobe_trampoline: \n"
" pushf\n"
/* skip cs, eip, orig_eax, es, ds */
" subl $20, %esp\n"
@@ -395,14 +395,15 @@ no_kprobe:
*/
fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head;
- struct hlist_node *node, *tmp;
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+ INIT_HLIST_HEAD(&empty_rp);
spin_lock_irqsave(&kretprobe_lock, flags);
- head = kretprobe_inst_table_head(current);
+ head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
@@ -413,14 +414,14 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
+ * function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (ri->task != current)
+ if (ri->task != current)
/* another task is sharing our hash bucket */
- continue;
+ continue;
if (ri->rp && ri->rp->handler){
__get_cpu_var(current_kprobe) = &ri->rp->kp;
@@ -429,7 +430,7 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
}
orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri);
+ recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
@@ -444,6 +445,10 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
spin_unlock_irqrestore(&kretprobe_lock, flags);
+ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
return (void*)orig_ret_address;
}
diff --git a/arch/i386/kernel/ldt.c b/arch/i386/kernel/ldt.c
index 983f957..445211e 100644
--- a/arch/i386/kernel/ldt.c
+++ b/arch/i386/kernel/ldt.c
@@ -1,5 +1,5 @@
/*
- * linux/kernel/ldt.c
+ * linux/arch/i386/kernel/ldt.c
*
* Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 0fc4997..3e8e3ad 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -13,7 +13,6 @@
* Mikael Pettersson : PM converted to driver model. Disable/enable API.
*/
-#include <linux/config.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 8c190ca..dad02a9 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -297,9 +297,9 @@ void show_regs(struct pt_regs * regs)
if (user_mode_vm(regs))
printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
printk(" EFLAGS: %08lx %s (%s %.*s)\n",
- regs->eflags, print_tainted(), system_utsname.release,
- (int)strcspn(system_utsname.version, " "),
- system_utsname.version);
+ regs->eflags, print_tainted(), init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->eax,regs->ebx,regs->ecx,regs->edx);
printk("ESI: %08lx EDI: %08lx EBP: %08lx",
@@ -425,13 +425,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
tsk = current;
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
- p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+ p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
+ IO_BITMAP_BYTES, GFP_KERNEL);
if (!p->thread.io_bitmap_ptr) {
p->thread.io_bitmap_max = 0;
return -ENOMEM;
}
- memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
- IO_BITMAP_BYTES);
set_tsk_thread_flag(p, TIF_IO_BITMAP);
}
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 465188e..1b080ab 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -700,3 +700,30 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
return 0;
}
EXPORT_SYMBOL(smp_call_function_single);
+
+static int convert_apicid_to_cpu(int apic_id)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (x86_cpu_to_apicid[i] == apic_id)
+ return i;
+ }
+ return -1;
+}
+
+int safe_smp_processor_id(void)
+{
+ int apicid, cpuid;
+
+ if (!boot_cpu_has(X86_FEATURE_APIC))
+ return 0;
+
+ apicid = hard_smp_processor_id();
+ if (apicid == BAD_APICID)
+ return 0;
+
+ cpuid = convert_apicid_to_cpu(apicid);
+
+ return cpuid >= 0 ? cpuid : 0;
+}
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 82b26d5..4bb8b77 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -612,6 +612,7 @@ extern struct {
/* which logical CPUs are on which nodes */
cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
+EXPORT_SYMBOL(node_2_cpu_mask);
/* which node each logical CPU is on */
int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
EXPORT_SYMBOL(cpu_2_node);
@@ -647,7 +648,7 @@ static void map_cpu_to_logical_apicid(void)
{
int cpu = smp_processor_id();
int apicid = logical_smp_processor_id();
- int node = apicid_to_node(hard_smp_processor_id());
+ int node = apicid_to_node(apicid);
if (!node_online(node))
node = first_online_node;
@@ -1061,7 +1062,7 @@ static void __cpuinit do_warm_boot_cpu(void *p)
static int __cpuinit __smp_prepare_cpu(int cpu)
{
- DECLARE_COMPLETION(done);
+ DECLARE_COMPLETION_ONSTACK(done);
struct warm_boot_cpu_info info;
struct work_struct task;
int apicid, ret;
diff --git a/arch/i386/kernel/sys_i386.c b/arch/i386/kernel/sys_i386.c
index 8fdb1fb..4048397 100644
--- a/arch/i386/kernel/sys_i386.c
+++ b/arch/i386/kernel/sys_i386.c
@@ -21,6 +21,7 @@
#include <linux/utsname.h>
#include <asm/uaccess.h>
+#include <asm/unistd.h>
#include <asm/ipc.h>
/*
@@ -210,7 +211,7 @@ asmlinkage int sys_uname(struct old_utsname __user * name)
if (!name)
return -EFAULT;
down_read(&uts_sem);
- err=copy_to_user(name, &system_utsname, sizeof (*name));
+ err = copy_to_user(name, utsname(), sizeof (*name));
up_read(&uts_sem);
return err?-EFAULT:0;
}
@@ -226,16 +227,21 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
down_read(&uts_sem);
- error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
- error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
- error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
- error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
- error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
- error |= __put_user(0,name->release+__OLD_UTS_LEN);
- error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
- error |= __put_user(0,name->version+__OLD_UTS_LEN);
- error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
- error |= __put_user(0,name->machine+__OLD_UTS_LEN);
+ error = __copy_to_user(&name->sysname, &utsname()->sysname,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
+ error |= __copy_to_user(&name->nodename, &utsname()->nodename,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
+ error |= __copy_to_user(&name->release, &utsname()->release,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->release + __OLD_UTS_LEN);
+ error |= __copy_to_user(&name->version, &utsname()->version,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->version + __OLD_UTS_LEN);
+ error |= __copy_to_user(&name->machine, &utsname()->machine,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->machine + __OLD_UTS_LEN);
up_read(&uts_sem);
@@ -243,3 +249,17 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
return error;
}
+
+
+/*
+ * Do a system call from kernel instead of calling sys_execve so we
+ * end up with proper pt_regs.
+ */
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ long __res;
+ asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
+ : "=a" (__res)
+ : "0" (__NR_execve),"ri" (filename),"c" (argv), "d" (envp) : "memory");
+ return __res;
+}
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 86944ac..58a2d55 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -76,8 +76,6 @@ int pit_latch_buggy; /* extern */
unsigned int cpu_khz; /* Detected as we calibrate the TSC */
EXPORT_SYMBOL(cpu_khz);
-extern unsigned long wall_jiffies;
-
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
@@ -329,7 +327,6 @@ static int timer_resume(struct sys_device *dev)
do_settimeofday(&ts);
write_seqlock_irqsave(&xtime_lock, flags);
jiffies_64 += sleep_length;
- wall_jiffies += sleep_length;
write_sequnlock_irqrestore(&xtime_lock, flags);
touch_softlockup_watchdog();
return 0;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 6820b8d..00489b7 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -357,9 +357,9 @@ void show_registers(struct pt_regs *regs)
KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
smp_processor_id(), 0xffff & regs->xcs, regs->eip,
- print_tainted(), regs->eflags, system_utsname.release,
- (int)strcspn(system_utsname.version, " "),
- system_utsname.version);
+ print_tainted(), regs->eflags, init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
regs->eax, regs->ebx, regs->ecx, regs->edx);
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
index 3c0714c..f6edb11 100644
--- a/arch/i386/lib/delay.c
+++ b/arch/i386/lib/delay.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/delay.h>
diff --git a/arch/i386/mach-visws/visws_apic.c b/arch/i386/mach-visws/visws_apic.c
index 8285225..5929f884 100644
--- a/arch/i386/mach-visws/visws_apic.c
+++ b/arch/i386/mach-visws/visws_apic.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/i386/mach_visws/visws_apic.c
+ * linux/arch/i386/mach-visws/visws_apic.c
*
* Copyright (C) 1999 Bent Hagemark, Ingo Molnar
*
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 6c86575..856c73f 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -99,6 +99,7 @@ static void do_boot_cpu(__u8 cpuid);
static void do_quad_bootstrap(void);
int hard_smp_processor_id(void);
+int safe_smp_processor_id(void);
/* Inline functions */
static inline void
@@ -1247,6 +1248,12 @@ hard_smp_processor_id(void)
return 0;
}
+int
+safe_smp_processor_id(void)
+{
+ return hard_smp_processor_id();
+}
+
/* broadcast a halt to all other CPUs */
void
smp_send_stop(void)
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 51e3739..455597d 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -153,8 +153,7 @@ static void __init find_max_pfn_node(int nid)
*/
if (node_start_pfn[nid] > max_pfn)
node_start_pfn[nid] = max_pfn;
- if (node_start_pfn[nid] > node_end_pfn[nid])
- BUG();
+ BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]);
}
/*
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index ba44000..f9f647c 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -38,22 +38,19 @@ void *kmap_atomic(struct page *page, enum km_type type)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
if (!pte_none(*(kmap_pte-idx)))
BUG();
-#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
return (void*) vaddr;
}
void kunmap_atomic(void *kvaddr, enum km_type type)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+#ifdef CONFIG_DEBUG_HIGHMEM
if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
dec_preempt_count();
preempt_check_resched();
@@ -62,14 +59,14 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
BUG();
-
+#endif
/*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
+ * Force other mappings to Oops if they'll try to access this pte
+ * without first remap it. Keeping stale mappings around is a bad idea
+ * also, in case the page changes cacheability attributes or becomes
+ * a protected page in a hypervisor.
*/
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- __flush_tlb_one(vaddr);
-#endif
+ kpte_clear_flush(kmap_pte-idx, vaddr);
dec_preempt_count();
preempt_check_resched();
@@ -88,7 +85,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
- __flush_tlb_one(vaddr);
return (void*) vaddr;
}
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 4a5a914..1674161 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -493,6 +493,7 @@ int __init set_kernel_exec(unsigned long vaddr, int enable)
pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
else
pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
+ pte_update_defer(&init_mm, vaddr, pte);
__flush_tlb_all();
out:
return ret;
@@ -568,8 +569,7 @@ void __init mem_init(void)
int bad_ppro;
#ifdef CONFIG_FLATMEM
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
#endif
bad_ppro = ppro_with_ram_bug();
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index 247fde7..fff08ae 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/fixmap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -21,82 +21,6 @@
#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
-static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, unsigned long phys_addr, unsigned long flags)
-{
- pte_t *pte;
- unsigned long pfn;
-
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel(pmd, addr);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- return 0;
-}
-
-static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, unsigned long phys_addr, unsigned long flags)
-{
- pmd_t *pmd;
- unsigned long next;
-
- phys_addr -= addr;
- pmd = pmd_alloc(&init_mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
- if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags))
- return -ENOMEM;
- } while (pmd++, addr = next, addr != end);
- return 0;
-}
-
-static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys_addr, unsigned long flags)
-{
- pud_t *pud;
- unsigned long next;
-
- phys_addr -= addr;
- pud = pud_alloc(&init_mm, pgd, addr);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
- if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags))
- return -ENOMEM;
- } while (pud++, addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_page_range(unsigned long addr,
- unsigned long end, unsigned long phys_addr, unsigned long flags)
-{
- pgd_t *pgd;
- unsigned long next;
- int err;
-
- BUG_ON(addr >= end);
- flush_cache_all();
- phys_addr -= addr;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
- if (err)
- break;
- } while (pgd++, addr = next, addr != end);
- flush_tlb_all();
- return err;
-}
-
/*
* Generic mapping function (not visible outside):
*/
@@ -115,6 +39,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
void __iomem * addr;
struct vm_struct * area;
unsigned long offset, last_addr;
+ pgprot_t prot;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -142,6 +67,9 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
return NULL;
}
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
+ | _PAGE_ACCESSED | flags);
+
/*
* Mappings have to be page-aligned
*/
@@ -158,7 +86,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
area->phys_addr = phys_addr;
addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long) addr,
- (unsigned long) addr + size, phys_addr, flags)) {
+ (unsigned long) addr + size, phys_addr, prot)) {
vunmap((void __force *) addr);
return NULL;
}
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index 83c3645..b60d7e8 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -393,7 +393,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
* We pretend to bring them out of full D3 state, and restore the proper
* IRQ, PCI cache line size, and BARs, otherwise the device won't function
* properly. In some cases, the device will generate an interrupt on
- * the wrong IRQ line, causing any devices sharing the the line it's
+ * the wrong IRQ line, causing any devices sharing the line it's
* *supposed* to use to be disabled by the kernel's IRQ debug code.
*/
static u16 toshiba_line_size;
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 4a8995c..47f02af 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -981,10 +981,6 @@ static void __init pcibios_fixup_irqs(void)
pci_name(bridge), 'A' + pin, irq);
}
if (irq >= 0) {
- if (use_pci_vector() &&
- !platform_legacy_irq(irq))
- irq = IO_APIC_VECTOR(irq);
-
printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
pci_name(dev), 'A' + pin, irq);
dev->irq = irq;
@@ -1169,33 +1165,3 @@ static int pirq_enable_irq(struct pci_dev *dev)
}
return 0;
}
-
-int pci_vector_resources(int last, int nr_released)
-{
- int count = nr_released;
-
- int next = last;
- int offset = (last % 8);
-
- while (next < FIRST_SYSTEM_VECTOR) {
- next += 8;
-#ifdef CONFIG_X86_64
- if (next == IA32_SYSCALL_VECTOR)
- continue;
-#else
- if (next == SYSCALL_VECTOR)
- continue;
-#endif
- count++;
- if (next >= FIRST_SYSTEM_VECTOR) {
- if (offset%8) {
- next = FIRST_DEVICE_VECTOR + offset;
- offset++;
- continue;
- }
- count--;
- }
- }
-
- return count;
-}
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index 05be8db..d0c3da3 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -67,7 +67,10 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
return 0;
}
-static inline void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
+/*
+ * This is always called under pci_config_lock
+ */
+static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
{
u32 dev_base = base | (bus << 20) | (devfn << 12);
if (dev_base != mmcfg_last_accessed_device) {