diff options
Diffstat (limited to 'arch')
400 files changed, 2519 insertions, 3140 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 2a85dc3..4c002ba 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -654,7 +654,7 @@ source "drivers/Kconfig" source "fs/Kconfig" -source "arch/alpha/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" source "arch/alpha/Kconfig.debug" diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c index f2956ac..497877b 100644 --- a/arch/alpha/kernel/err_marvel.c +++ b/arch/alpha/kernel/err_marvel.c @@ -1082,7 +1082,7 @@ marvel_machine_check(u64 vector, u64 la_ptr) } /* - * A system event or error has occured, handle it here. + * A system event or error has occurred, handle it here. * * Any errors in the logout frame have already been cleared by the * PALcode, so just parse it. diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index 543d96d..6f38678 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c @@ -591,7 +591,7 @@ privateer_process_680_frame(struct el_common *mchk_header, int print) (struct el_PRIVATEER_envdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); - /* TODO - catagorize errors, for now, no error */ + /* TODO - categorize errors, for now, no error */ if (!print) return status; diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index ce85715..6413c5f 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -715,7 +715,7 @@ osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, /* * Alpha Architecture Handbook 4.7.7.3: * To be fully IEEE compiant, we must track the current IEEE - * exception state in software, because spurrious bits can be + * exception state in software, because spurious bits can be * set in the trap shadow of a software-complete insn. */ diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c index 8c8aaa2..8d2982a 100644 --- a/arch/alpha/kernel/semaphore.c +++ b/arch/alpha/kernel/semaphore.c @@ -69,7 +69,7 @@ __down_failed(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down failed(%p)\n", - tsk->comm, tsk->pid, sem); + tsk->comm, task_pid_nr(tsk), sem); #endif tsk->state = TASK_UNINTERRUPTIBLE; @@ -98,7 +98,7 @@ __down_failed(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down acquired(%p)\n", - tsk->comm, tsk->pid, sem); + tsk->comm, task_pid_nr(tsk), sem); #endif } @@ -111,7 +111,7 @@ __down_failed_interruptible(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down failed(%p)\n", - tsk->comm, tsk->pid, sem); + tsk->comm, task_pid_nr(tsk), sem); #endif tsk->state = TASK_INTERRUPTIBLE; @@ -139,7 +139,7 @@ __down_failed_interruptible(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down %s(%p)\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), (ret < 0 ? "interrupted" : "acquired"), sem); #endif return ret; @@ -168,7 +168,7 @@ down(struct semaphore *sem) #endif #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down(%p) <count=%d> from %p\n", - current->comm, current->pid, sem, + current->comm, task_pid_nr(current), sem, atomic_read(&sem->count), __builtin_return_address(0)); #endif __down(sem); @@ -182,7 +182,7 @@ down_interruptible(struct semaphore *sem) #endif #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down(%p) <count=%d> from %p\n", - current->comm, current->pid, sem, + current->comm, task_pid_nr(current), sem, atomic_read(&sem->count), __builtin_return_address(0)); #endif return __down_interruptible(sem); @@ -201,7 +201,7 @@ down_trylock(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down_trylock %s from %p\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), ret ? "failed" : "acquired", __builtin_return_address(0)); #endif @@ -217,7 +217,7 @@ up(struct semaphore *sem) #endif #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): up(%p) <count=%d> from %p\n", - current->comm, current->pid, sem, + current->comm, task_pid_nr(current), sem, atomic_read(&sem->count), __builtin_return_address(0)); #endif __up(sem); diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index ad17644..f4ab233 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -439,7 +439,6 @@ setup_smp(void) ((char *)cpubase + i*hwrpb->processor_size); if ((cpu->flags & 0x1cc) == 0x1cc) { smp_num_probed++; - /* Assume here that "whami" == index */ cpu_set(i, cpu_present_map); cpu->pal_revision = boot_cpu_palrev; } diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 49bedfb..d187d01 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c @@ -138,7 +138,7 @@ alcor_init_irq(void) for (i = 16; i < 48; ++i) { /* On Alcor, at least, lines 20..30 are not connected - and can generate spurrious interrupts if we turn them + and can generate spurious interrupts if we turn them on while IRQ probing. */ if (i >= 16+20 && i <= 16+30) continue; diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index 14b5a75..ee7b900 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -78,7 +78,7 @@ alphabook1_init_arch(void) * example, sound boards seem to like using IRQ 9. * * This is NOT how we should do it. PIRQ0-X should have - * their own IRQ's, the way intel uses the IO-APIC irq's. + * their own IRQs, the way intel uses the IO-APIC IRQs. */ static void __init diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index ec0f05e..2dc7f9f 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -182,7 +182,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif - printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err); + printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE); dik_show_trace((unsigned long *)(regs+1)); @@ -646,7 +646,7 @@ got_exception: lock_kernel(); printk("%s(%d): unhandled unaligned exception\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", pc, una_reg(26), regs->ps); @@ -786,7 +786,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, } if (++cnt < 5) { printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), regs->pc - 4, va, opcode, reg); } last_time = jiffies; diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c index 8698e07..199f6ef 100644 --- a/arch/alpha/lib/checksum.c +++ b/arch/alpha/lib/checksum.c @@ -5,7 +5,7 @@ * in an architecture-specific manner due to speed.. * Comments in other versions indicate that the algorithms are from RFC1071 * - * accellerated versions (and 21264 assembly versions ) contributed by + * accelerated versions (and 21264 assembly versions ) contributed by * Rick Gorton <rick.gorton@alpha-processor.com> */ diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index 4ca75c7..40736da 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -2,7 +2,7 @@ * csum_partial_copy - do IP checksumming and copy * * (C) Copyright 1996 Linus Torvalds - * accellerated versions (and 21264 assembly versions ) contributed by + * accelerated versions (and 21264 assembly versions ) contributed by * Rick Gorton <rick.gorton@alpha-processor.com> * * Don't look at this too closely - you'll go mad. The things diff --git a/arch/alpha/lib/fls.c b/arch/alpha/lib/fls.c index 7ad84ea..32afaa3 100644 --- a/arch/alpha/lib/fls.c +++ b/arch/alpha/lib/fls.c @@ -3,7 +3,7 @@ */ #include <linux/module.h> -#include <asm/bitops.h> +#include <linux/bitops.h> /* This is fls(x)-1, except zero is held to zero. This allows most efficent input into extbl, plus it allows easy handling of fls(0)=0. */ diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 25154df..4829f96 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -188,13 +188,13 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; } printk(KERN_ALERT "VM: killing process %s(%d)\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); if (!user_mode(regs)) goto no_context; do_group_exit(SIGKILL); diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 5e6da47..40c15e7 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -235,7 +235,7 @@ callback_init(void * kernel_end) unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT; crb->map[i].va = vaddr; for (j = 0; j < crb->map[i].count; ++j) { - /* Newer console's (especially on larger + /* Newer consoles (especially on larger systems) may require more pages of PTEs. Grab additional pages as needed. */ if (pmd != pmd_offset(pgd, vaddr)) { diff --git a/arch/alpha/oprofile/Kconfig b/arch/alpha/oprofile/Kconfig deleted file mode 100644 index 5ade198..0000000 --- a/arch/alpha/oprofile/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ - -menu "Profiling support" - depends on EXPERIMENTAL - -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -endmenu - diff --git a/arch/alpha/oprofile/op_impl.h b/arch/alpha/oprofile/op_impl.h index 6b97893..b2b87ae 100644 --- a/arch/alpha/oprofile/op_impl.h +++ b/arch/alpha/oprofile/op_impl.h @@ -38,7 +38,7 @@ struct op_register_config { unsigned long need_reset; }; -/* Per-architecture configury and hooks. */ +/* Per-architecture configuration and hooks. */ struct op_axp_model { void (*reg_setup) (struct op_register_config *, struct op_counter_config *, diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0a0c88d..a0cdaaf 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -851,7 +851,7 @@ config KEXEC help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot - but it is indepedent of the system firmware. And like a reboot + but it is independent of the system firmware. And like a reboot you can start any kernel with it, not just Linux. It is an ongoing process to be certain the hardware in a machine @@ -1068,7 +1068,7 @@ endmenu source "fs/Kconfig" -source "arch/arm/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" source "arch/arm/Kconfig.debug" diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c index 111a7fa5..5bba525 100644 --- a/arch/arm/common/sharpsl_pm.c +++ b/arch/arm/common/sharpsl_pm.c @@ -24,6 +24,7 @@ #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/apm-emulation.h> +#include <linux/suspend.h> #include <asm/hardware.h> #include <asm/mach-types.h> @@ -765,9 +766,9 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info) info->battery_life = sharpsl_pm.battstat.mainbat_percent; } -static struct pm_ops sharpsl_pm_ops = { +static struct platform_suspend_ops sharpsl_pm_ops = { .enter = corgi_pxa_pm_enter, - .valid = pm_valid_only_mem, + .valid = suspend_valid_only_mem, }; static int __init sharpsl_pm_probe(struct platform_device *pdev) @@ -799,7 +800,7 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev) apm_get_power_status = sharpsl_apm_get_power_status; - pm_set_ops(&sharpsl_pm_ops); + suspend_set_ops(&sharpsl_pm_ops); mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250)); @@ -808,7 +809,7 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev) static int sharpsl_pm_remove(struct platform_device *pdev) { - pm_set_ops(NULL); + suspend_set_ops(NULL); device_remove_file(&pdev->dev, &dev_attr_battery_percentage); device_remove_file(&pdev->dev, &dev_attr_battery_voltage); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 93b7f8e..4f1a031 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -265,7 +265,7 @@ void __show_regs(struct pt_regs *regs) void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid: %d, comm: %20s\n", current->pid, current->comm); + printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); __show_regs(regs); __backtrace(); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 5feee72..4b05dc5 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -382,16 +382,16 @@ static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp) if (ret != 2 || old_insn.thumb != BREAKINST_THUMB) printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at " - "0x%08lx (0x%04x)\n", task->comm, task->pid, - addr, old_insn.thumb); + "0x%08lx (0x%04x)\n", task->comm, + task_pid_nr(task), addr, old_insn.thumb); } else { ret = swap_insn(task, addr & ~3, &old_insn.arm, &bp->insn.arm, 4); if (ret != 4 || old_insn.arm != BREAKINST_ARM) printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at " - "0x%08lx (0x%08x)\n", task->comm, task->pid, - addr, old_insn.arm); + "0x%08lx (0x%08x)\n", task->comm, + task_pid_nr(task), addr, old_insn.arm); } } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 8ad4761..4764bd9 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -223,7 +223,7 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p print_modules(); __show_regs(regs); printk("Process %s (pid: %d, stack limit = 0x%p)\n", - tsk->comm, tsk->pid, thread + 1); + tsk->comm, task_pid_nr(tsk), thread + 1); if (!user_mode(regs) || in_interrupt()) { dump_mem("Stack: ", regs->ARM_sp, @@ -337,7 +337,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", - current->comm, current->pid, pc); + current->comm, task_pid_nr(current), pc); dump_instr(regs); } #endif @@ -388,7 +388,7 @@ static int bad_syscall(int n, struct pt_regs *regs) #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_SYSCALL) { printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", - current->pid, current->comm, n); + task_pid_nr(current), current->comm, n); dump_instr(regs); } #endif @@ -565,7 +565,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) */ if (user_debug & UDBG_SYSCALL) { printk("[%d] %s: arm syscall %d\n", - current->pid, current->comm, no); + task_pid_nr(current), current->comm, no); dump_instr(regs); if (user_mode(regs)) { __show_regs(regs); @@ -642,7 +642,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_BADABORT) { printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", - current->pid, current->comm, code, instr); + task_pid_nr(current), current->comm, code, instr); dump_instr(regs); show_pte(current->mm, addr); } diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c index ba4a1bb..aa2d365 100644 --- a/arch/arm/mach-at91/gpio.c +++ b/arch/arm/mach-at91/gpio.c @@ -439,7 +439,7 @@ void __init at91_gpio_irq_setup(void) for (i = 0; i < 32; i++, pin++) { /* * Can use the "simple" and not "edge" handler since it's - * shorter, and the AIC handles interupts sanely. + * shorter, and the AIC handles interrupts sanely. */ set_irq_chip(pin, &gpio_irqchip); set_irq_handler(pin, handle_simple_irq); diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index ddf9184..98cb614 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -10,10 +10,9 @@ * (at your option) any later version. */ -#include <linux/pm.h> +#include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> -#include <linux/pm.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/module.h> @@ -199,7 +198,7 @@ error: } -static struct pm_ops at91_pm_ops ={ +static struct platform_suspend_ops at91_pm_ops ={ .valid = at91_pm_valid_state, .set_target = at91_pm_set_target, .enter = at91_pm_enter, @@ -220,7 +219,7 @@ static int __init at91_pm_init(void) /* Disable SDRAM low-power mode. Cannot be used with self-refresh. */ at91_sys_write(AT91_SDRAMC_LPR, 0); - pm_set_ops(&at91_pm_ops); + suspend_set_ops(&at91_pm_ops); return 0; } diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c index 0733078..1da9d59 100644 --- a/arch/arm/mach-omap1/irq.c +++ b/arch/arm/mach-omap1/irq.c @@ -5,7 +5,7 @@ * * Copyright (C) 2004 Nokia Corporation * Written by Tony Lindgren <tony@atomide.com> - * Major cleanups by Juha Yrjölä <juha.yrjola@nokia.com> + * Major cleanups by Juha Yrjölä <juha.yrjola@nokia.com> * * Completely re-written to support various OMAP chips with bank specific * interrupt handlers. diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c index 089b820..3bf01e2 100644 --- a/arch/arm/mach-omap1/pm.c +++ b/arch/arm/mach-omap1/pm.c @@ -35,10 +35,9 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/pm.h> +#include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> -#include <linux/pm.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/module.h> @@ -600,27 +599,15 @@ static void (*saved_idle)(void) = NULL; /* * omap_pm_prepare - Do preliminary suspend work. - * @state: suspend state we're entering. * */ -static int omap_pm_prepare(suspend_state_t state) +static int omap_pm_prepare(void) { - int error = 0; - /* We cannot sleep in idle until we have resumed */ saved_idle = pm_idle; pm_idle = NULL; - switch (state) - { - case PM_SUSPEND_STANDBY: - case PM_SUSPEND_MEM: - break; - default: - return -EINVAL; - } - - return error; + return 0; } @@ -648,16 +635,14 @@ static int omap_pm_enter(suspend_state_t state) /** * omap_pm_finish - Finish up suspend sequence. - * @state: State we're coming out of. * * This is called after we wake back up (or if entering the sleep state * failed). */ -static int omap_pm_finish(suspend_state_t state) +static void omap_pm_finish(void) { pm_idle = saved_idle; - return 0; } @@ -674,11 +659,11 @@ static struct irqaction omap_wakeup_irq = { -static struct pm_ops omap_pm_ops ={ +static struct platform_suspend_ops omap_pm_ops ={ .prepare = omap_pm_prepare, .enter = omap_pm_enter, .finish = omap_pm_finish, - .valid = pm_valid_only_mem, + .valid = suspend_valid_only_mem, }; static int __init omap_pm_init(void) @@ -735,7 +720,7 @@ static int __init omap_pm_init(void) else if (cpu_is_omap16xx()) omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3); - pm_set_ops(&omap_pm_ops); + suspend_set_ops(&omap_pm_ops); #if defined(DEBUG) && defined(CONFIG_PROC_FS) omap_pm_init_proc(); diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 6f4a543..baf7d82 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -16,10 +16,9 @@ * published by the Free Software Foundation. */ -#include <linux/pm.h> +#include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> -#include <linux/pm.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/module.h> @@ -71,28 +70,12 @@ void omap2_pm_idle(void) local_irq_enable(); } -static int omap2_pm_prepare(suspend_state_t state) +static int omap2_pm_prepare(void) { - int error = 0; - /* We cannot sleep in idle until we have resumed */ saved_idle = pm_idle; pm_idle = NULL; - - switch (state) - { - case PM_SUSPEND_STANDBY: - case PM_SUSPEND_MEM: - break; - - case PM_SUSPEND_DISK: - return -ENOTSUPP; - - default: - return -EINVAL; - } - - return error; + return 0; } #define INT0_WAKE_MASK (OMAP_IRQ_BIT(INT_24XX_GPIO_BANK1) | \ @@ -353,9 +336,6 @@ static int omap2_pm_enter(suspend_state_t state) case PM_SUSPEND_MEM: ret = omap2_pm_suspend(); break; - case PM_SUSPEND_DISK: - ret = -ENOTSUPP; - break; default: ret = -EINVAL; } @@ -363,17 +343,16 @@ static int omap2_pm_enter(suspend_state_t state) return ret; } -static int omap2_pm_finish(suspend_state_t state) +static void omap2_pm_finish(void) { pm_idle = saved_idle; - return 0; } -static struct pm_ops omap_pm_ops = { +static struct platform_suspend_ops omap_pm_ops = { .prepare = omap2_pm_prepare, .enter = omap2_pm_enter, .finish = omap2_pm_finish, - .valid = pm_valid_only_mem, + .valid = suspend_valid_only_mem, }; int __init omap2_pm_init(void) @@ -397,7 +376,7 @@ int __init omap2_pm_init(void) omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, omap24xx_cpu_suspend_sz); - pm_set_ops(&omap_pm_ops); + suspend_set_ops(&omap_pm_ops); pm_idle = omap2_pm_idle; pmdomain_init(); diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 62e801e..8d322c2 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -5,7 +5,7 @@ * * Copyright (C) 2005 Nokia Corporation * Author: Paul Mundt <paul.mundt@nokia.com> - * Juha Yrjölä <juha.yrjola@nokia.com> + * Juha Yrjölä <juha.yrjola@nokia.com> * OMAP Dual-mode timer framework support by Timo Teras * * Some parts based off of TI's 24xx code: diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c index 2a137f3..40116d2 100644 --- a/arch/arm/mach-pnx4008/pm.c +++ b/arch/arm/mach-pnx4008/pm.c @@ -15,7 +15,7 @@ #include <linux/rtc.h> #include <linux/sched.h> #include <linux/proc_fs.h> -#include <linux/pm.h> +#include <linux/suspend.h> #include <linux/delay.h> #include <linux/clk.h> @@ -117,7 +117,7 @@ static int pnx4008_pm_valid(suspend_state_t state) (state == PM_SUSPEND_MEM); } -static struct pm_ops pnx4008_pm_ops = { +static struct platform_suspend_ops pnx4008_pm_ops = { .enter = pnx4008_pm_enter, .valid = pnx4008_pm_valid, }; @@ -146,7 +146,7 @@ static int __init pnx4008_pm_init(void) return -ENOMEM; } - pm_set_ops(&pnx4008_pm_ops); + suspend_set_ops(&pnx4008_pm_ops); return 0; } diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index b59a81a..a941c71 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c @@ -86,7 +86,7 @@ static int pxa_pm_valid(suspend_state_t state) return -EINVAL; } -static struct pm_ops pxa_pm_ops = { +static struct platform_suspend_ops pxa_pm_ops = { .valid = pxa_pm_valid, .enter = pxa_pm_enter, }; @@ -104,7 +104,7 @@ static int __init pxa_pm_init(void) return -ENOMEM; } - pm_set_ops(&pxa_pm_ops); + suspend_set_ops(&pxa_pm_ops); return 0; } diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index 0d6a725..dcd81f8 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -20,7 +20,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> -#include <linux/pm.h> +#include <linux/suspend.h> #include <asm/hardware.h> #include <asm/arch/irqs.h> @@ -215,7 +215,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) static struct pxa_cpu_pm_fns pxa25x_cpu_pm_fns = { .save_size = SLEEP_SAVE_SIZE, - .valid = pm_valid_only_mem, + .valid = suspend_valid_only_mem, .save = pxa25x_cpu_pm_save, .restore = pxa25x_cpu_pm_restore, .enter = pxa25x_cpu_pm_enter, diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 2d7fc39..d0f2b59 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -14,7 +14,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> -#include <linux/pm.h> +#include <linux/suspend.h> #include <linux/platform_device.h> #include <asm/hardware.h> diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c index cab9d62..2bfaa61 100644 --- a/arch/arm/mach-s3c2410/clock.c +++ b/arch/arm/mach-s3c2410/clock.c @@ -238,7 +238,7 @@ int __init s3c2410_baseclk_add(void) } /* We must be careful disabling the clocks we are not intending to - * be using at boot time, as subsytems such as the LCD which do + * be using at boot time, as subsystems such as the LCD which do * their own DMA requests to the bus can cause the system to lockup * if they where in the middle of requesting bus access. * diff --git a/arch/arm/mach-s3c2412/clock.c b/arch/arm/mach-s3c2412/clock.c index 8543dd6..4589936 100644 --- a/arch/arm/mach-s3c2412/clock.c +++ b/arch/arm/mach-s3c2412/clock.c @@ -689,7 +689,7 @@ int __init s3c2412_baseclk_add(void) } /* We must be careful disabling the clocks we are not intending to - * be using at boot time, as subsytems such as the LCD which do + * be using at boot time, as subsystems such as the LCD which do * their own DMA requests to the bus can cause the system to lockup * if they where in the middle of requesting bus access. * diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c index 5840294..b42f956 100644 --- a/arch/arm/mach-s3c2443/clock.c +++ b/arch/arm/mach-s3c2443/clock.c @@ -1005,7 +1005,7 @@ void __init s3c2443_init_clocks(int xtal) } /* We must be careful disabling the clocks we are not intending to - * be using at boot time, as subsytems such as the LCD which do + * be using at boot time, as subsystems such as the LCD which do * their own DMA requests to the bus can cause the system to lockup * if they where in the middle of requesting bus access. * diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index 01a37d3..246c573 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -122,14 +122,14 @@ unsigned long sleep_phys_sp(void *sp) return virt_to_phys(sp); } -static struct pm_ops sa11x0_pm_ops = { +static struct platform_suspend_ops sa11x0_pm_ops = { .enter = sa11x0_pm_enter, - .valid = pm_valid_only_mem, + .valid = suspend_valid_only_mem, }; static int __init sa11x0_pm_init(void) { - pm_set_ops(&sa11x0_pm_ops); + suspend_set_ops(&sa11x0_pm_ops); return 0; } diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 074b7cb..e162cca 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -757,7 +757,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (ai_usermode & 1) printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx " "Address=0x%08lx FSR 0x%03x\n", current->comm, - current->pid, instrptr, + task_pid_nr(current), instrptr, thumb_mode(regs) ? 4 : 8, thumb_mode(regs) ? tinstr : instr, addr, fsr); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 59ed1d0..a8a7dab 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -197,7 +197,7 @@ survive: return fault; out_of_memory: - if (!is_init(tsk)) + if (!is_global_init(tsk)) goto out; /* diff --git a/arch/arm/nwfpe/fpopcode.h b/arch/arm/nwfpe/fpopcode.h index ec78e35..786e4c9 100644 --- a/arch/arm/nwfpe/fpopcode.h +++ b/arch/arm/nwfpe/fpopcode.h @@ -78,11 +78,11 @@ TABLE 1 +-------------------------+---+---+---------+---------+ | Precision | u | v | FPSR.EP | length | +-------------------------+---+---+---------+---------+ -| Single | 0 ü 0 | x | 1 words | -| Double | 1 ü 1 | x | 2 words | -| Extended | 1 ü 1 | x | 3 words | -| Packed decimal | 1 ü 1 | 0 | 3 words | -| Expanded packed decimal | 1 ü 1 | 1 | 4 words | +| Single | 0 | 0 | x | 1 words | +| Double | 1 | 1 | x | 2 words | +| Extended | 1 | 1 | x | 3 words | +| Packed decimal | 1 | 1 | 0 | 3 words | +| Expanded packed decimal | 1 | 1 | 1 | 4 words | +-------------------------+---+---+---------+---------+ Note: x = don't care */ @@ -92,10 +92,10 @@ TABLE 2 +---+---+---------------------------------+ | w | x | Number of registers to transfer | +---+---+---------------------------------+ -| 0 ü 1 | 1 | -| 1 ü 0 | 2 | -| 1 ü 1 | 3 | -| 0 ü 0 | 4 | +| 0 | 1 | 1 | +| 1 | 0 | 2 | +| 1 | 1 | 3 | +| 0 | 0 | 4 | +---+---+---------------------------------+ */ @@ -156,10 +156,10 @@ TABLE 5 +-------------------------+---+---+ | Rounding Precision | e | f | +-------------------------+---+---+ -| IEEE Single precision | 0 ü 0 | -| IEEE Double precision | 0 ü 1 | -| IEEE Extended precision | 1 ü 0 | -| undefined (trap) | 1 ü 1 | +| IEEE Single precision | 0 | 0 | +| IEEE Double precision | 0 | 1 | +| IEEE Extended precision | 1 | 0 | +| undefined (trap) | 1 | 1 | +-------------------------+---+---+ */ @@ -168,10 +168,10 @@ TABLE 5 +---------------------------------+---+---+ | Rounding Mode | g | h | +---------------------------------+---+---+ -| Round to nearest (default) | 0 ü 0 | -| Round toward plus infinity | 0 ü 1 | -| Round toward negative infinity | 1 ü 0 | -| Round toward zero | 1 ü 1 | +| Round to nearest (default) | 0 | 0 | +| Round toward plus infinity | 0 | 1 | +| Round toward negative infinity | 1 | 0 | +| Round toward zero | 1 | 1 | +---------------------------------+---+---+ */ @@ -369,20 +369,20 @@ TABLE 5 #define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5) #ifdef CONFIG_FPE_NWFPE_XP -static inline __attribute_pure__ floatx80 getExtendedConstant(const unsigned int nIndex) +static inline floatx80 __pure getExtendedConstant(const unsigned int nIndex) { extern const floatx80 floatx80Constant[]; return floatx80Constant[nIndex]; } #endif -static inline __attribute_pure__ float64 getDoubleConstant(const unsigned int nIndex) +static inline float64 __pure getDoubleConstant(const unsigned int nIndex) { extern const float64 float64Constant[]; return float64Constant[nIndex]; } -static inline __attribute_pure__ float32 getSingleConstant(const unsigned int nIndex) +static inline float32 __pure getSingleConstant(const unsigned int nIndex) { extern const float32 float32Constant[]; return float32Constant[nIndex]; diff --git a/arch/arm/oprofile/Kconfig b/arch/arm/oprofile/Kconfig deleted file mode 100644 index afd93ad..0000000 --- a/arch/arm/oprofile/Kconfig +++ /dev/null @@ -1,42 +0,0 @@ - -menu "Profiling support" - depends on EXPERIMENTAL - -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -if OPROFILE - -config OPROFILE_ARMV6 - bool - depends on CPU_V6 && !SMP - default y - select OPROFILE_ARM11_CORE - -config OPROFILE_MPCORE - bool - depends on CPU_V6 && SMP - default y - select OPROFILE_ARM11_CORE - -config OPROFILE_ARM11_CORE - bool - -endif - -endmenu - diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 05a3849..dcbba07 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -2,7 +2,7 @@ * linux/arch/arm/plat-omap/dma.c * * Copyright (C) 2003 Nokia Corporation - * Author: Juha Yrjölä <juha.yrjola@nokia.com> + * Author: Juha Yrjölä <juha.yrjola@nokia.com> * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> * Graphics DMA and LCD DMA graphics tranformations * by Imre Deak <imre.deak@nokia.com> diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 337455d..6097753 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -4,7 +4,7 @@ * Support functions for OMAP GPIO * * Copyright (C) 2003-2005 Nokia Corporation - * Written by Juha Yrjölä <juha.yrjola@nokia.com> + * Written by Juha Yrjölä <juha.yrjola@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/plat-s3c24xx/pm.c index eab1850..4fdb311 100644 --- a/arch/arm/plat-s3c24xx/pm.c +++ b/arch/arm/plat-s3c24xx/pm.c @@ -612,9 +612,9 @@ static int s3c2410_pm_enter(suspend_state_t state) return 0; } -static struct pm_ops s3c2410_pm_ops = { +static struct platform_suspend_ops s3c2410_pm_ops = { .enter = s3c2410_pm_enter, - .valid = pm_valid_only_mem, + .valid = suspend_valid_only_mem, }; /* s3c2410_pm_init @@ -628,6 +628,6 @@ int __init s3c2410_pm_init(void) { printk("S3C2410 Power Management, (c) 2004 Simtec Electronics\n"); - pm_set_ops(&s3c2410_pm_ops); + suspend_set_ops(&s3c2410_pm_ops); return 0; } diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index d12346a..bbecbd8 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -189,7 +189,7 @@ config CMDLINE endmenu -menu "Power managment options" +menu "Power management options" menu "CPU Frequency scaling" diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index 9a73ce7..8a7caf8 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c @@ -89,7 +89,7 @@ void _exception(long signr, struct pt_regs *regs, int code, * generate the same exception over and over again and we get * nowhere. Better to kill it and let the kernel panic. */ - if (is_init(current)) { + if (is_global_init(current)) { __sighandler_t handler; spin_lock_irq(¤t->sighand->siglock); diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 11472f8..6560cb1 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -160,7 +160,7 @@ bad_area: if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", - is_init(tsk) ? KERN_EMERG : KERN_INFO, + is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); _exception(SIGSEGV, regs, code, address); @@ -209,7 +209,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; @@ -231,7 +231,7 @@ do_sigbus: if (exception_trace) printk("%s%s[%d]: bus error at %08lx pc %08lx " "sp %08lx ecr %lu\n", - is_init(tsk) ? KERN_EMERG : KERN_INFO, + is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index aa9db30..ad28dc7 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -613,85 +613,86 @@ config I_ENTRY_L1 bool "Locate interrupt entry code in L1 Memory" default y help - If enabled interrupt entry code (STORE/RESTORE CONTEXT) is linked - into L1 instruction memory.(less latency) + If enabled, interrupt entry code (STORE/RESTORE CONTEXT) is linked + into L1 instruction memory. (less latency) config EXCPT_IRQ_SYSC_L1 - bool "Locate entire ASM lowlevel excepetion / interrupt - Syscall and CPLB handler code in L1 Memory" + bool "Locate entire ASM lowlevel exception / interrupt - Syscall and CPLB handler code in L1 Memory" default y help - If enabled entire ASM lowlevel exception and interrupt entry code (STORE/RESTORE CONTEXT) is linked - into L1 instruction memory.(less latency) + If enabled, the entire ASM lowlevel exception and interrupt entry code + (STORE/RESTORE CONTEXT) is linked into L1 instruction memory. + (less latency) config DO_IRQ_L1 bool "Locate frequently called do_irq dispatcher function in L1 Memory" default y help - If enabled frequently called do_irq dispatcher function is linked - into L1 instruction memory.(less latency) + If enabled, the frequently called do_irq dispatcher function is linked + into L1 instruction memory. (less latency) config CORE_TIMER_IRQ_L1 bool "Locate frequently called timer_interrupt() function in L1 Memory" default y help - If enabled frequently called timer_interrupt() function is linked - into L1 instruction memory.(less latency) + If enabled, the frequently called timer_interrupt() function is linked + into L1 instruction memory. (less latency) config IDLE_L1 bool "Locate frequently idle function in L1 Memory" default y help - If enabled frequently called idle function is linked - into L1 instruction memory.(less latency) + If enabled, the frequently called idle function is linked + into L1 instruction memory. (less latency) config SCHEDULE_L1 bool "Locate kernel schedule function in L1 Memory" default y help - If enabled frequently called kernel schedule is linked - into L1 instruction memory.(less latency) + If enabled, the frequently called kernel schedule is linked + into L1 instruction memory. (less latency) config ARITHMETIC_OPS_L1 bool "Locate kernel owned arithmetic functions in L1 Memory" default y help - If enabled arithmetic functions are linked - into L1 instruction memory.(less latency) + If enabled, arithmetic functions are linked + into L1 instruction memory. (less latency) config ACCESS_OK_L1 bool "Locate access_ok function in L1 Memory" default y help - If enabled access_ok function is linked - into L1 instruction memory.(less latency) + If enabled, the access_ok function is linked + into L1 instruction memory. (less latency) config MEMSET_L1 bool "Locate memset function in L1 Memory" default y help - If enabled memset function is linked - into L1 instruction memory.(less latency) + If enabled, the memset function is linked + into L1 instruction memory. (less latency) config MEMCPY_L1 bool "Locate memcpy function in L1 Memory" default y help - If enabled memcpy function is linked - into L1 instruction memory.(less latency) + If enabled, the memcpy function is linked + into L1 instruction memory. (less latency) config SYS_BFIN_SPINLOCK_L1 bool "Locate sys_bfin_spinlock function in L1 Memory" default y help - If enabled sys_bfin_spinlock function is linked - into L1 instruction memory.(less latency) + If enabled, sys_bfin_spinlock function is linked + into L1 instruction memory. (less latency) config IP_CHECKSUM_L1 bool "Locate IP Checksum function in L1 Memory" default n help - If enabled IP Checksum function is linked - into L1 instruction memory.(less latency) + If enabled, the IP Checksum function is linked + into L1 instruction memory. (less latency) config CACHELINE_ALIGNED_L1 bool "Locate cacheline_aligned data to L1 Data Memory" @@ -699,24 +700,24 @@ config CACHELINE_ALIGNED_L1 default n if BF54x depends on !BF531 help - If enabled cacheline_anligned data is linked - into L1 data memory.(less latency) + If enabled, cacheline_anligned data is linked + into L1 data memory. (less latency) config SYSCALL_TAB_L1 bool "Locate Syscall Table L1 Data Memory" default n depends on !BF531 help - If enabled the Syscall LUT is linked - into L1 data memory.(less latency) + If enabled, the Syscall LUT is linked + into L1 data memory. (less latency) config CPLB_SWITCH_TAB_L1 bool "Locate CPLB Switch Tables L1 Data Memory" default n depends on !BF531 help - If enabled the CPLB Switch Tables are linked - into L1 data memory.(less latency) + If enabled, the CPLB Switch Tables are linked + into L1 data memory. (less latency) endmenu @@ -1012,7 +1013,7 @@ source "drivers/Kconfig" source "fs/Kconfig" -source "arch/blackfin/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" menu "Kernel hacking" @@ -1029,13 +1030,13 @@ config DEBUG_HWERR from. config DEBUG_ICACHE_CHECK - bool "Check Instruction cache coherancy" + bool "Check Instruction cache coherency" depends on DEBUG_KERNEL depends on DEBUG_HWERR help - Say Y here if you are getting wierd unexplained errors. This will - ensure that icache is what SDRAM says it should be, by doing a - byte wise comparision between SDRAM and instruction cache. This + Say Y here if you are getting weird unexplained errors. This will + ensure that icache is what SDRAM says it should be by doing a + byte wise comparison between SDRAM and instruction cache. This also relocates the irq_panic() function to L1 memory, (which is un-cached). diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c index b103027..dac51fb 100644 --- a/arch/blackfin/mach-common/pm.c +++ b/arch/blackfin/mach-common/pm.c @@ -32,7 +32,7 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -#include <linux/pm.h> +#include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include <linux/io.h> @@ -89,28 +89,15 @@ void bfin_pm_suspend_standby_enter(void) #endif /* CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR */ } - /* - * bfin_pm_prepare - Do preliminary suspend work. - * @state: suspend state we're entering. + * bfin_pm_valid - Tell the PM core that we only support the standby sleep + * state + * @state: suspend state we're checking. * */ -static int bfin_pm_prepare(suspend_state_t state) +static int bfin_pm_valid(suspend_state_t state) { - int error = 0; - - switch (state) { - case PM_SUSPEND_STANDBY: - break; - - case PM_SUSPEND_MEM: - return -ENOTSUPP; - - default: - return -EINVAL; - } - - return error; + return (state == PM_SUSPEND_STANDBY); } /* @@ -135,44 +122,14 @@ static int bfin_pm_enter(suspend_state_t state) return 0; } -/* - * bfin_pm_finish - Finish up suspend sequence. - * @state: State we're coming out of. - * - * This is called after we wake back up (or if entering the sleep state - * failed). - */ -static int bfin_pm_finish(suspend_state_t state) -{ - switch (state) { - case PM_SUSPEND_STANDBY: - break; - - case PM_SUSPEND_MEM: - return -ENOTSUPP; - - default: - return -EINVAL; - } - - return 0; -} - -static int bfin_pm_valid(suspend_state_t state) -{ - return (state == PM_SUSPEND_STANDBY); -} - -struct pm_ops bfin_pm_ops = { - .prepare = bfin_pm_prepare, +struct platform_suspend_ops bfin_pm_ops = { .enter = bfin_pm_enter, - .finish = bfin_pm_finish, .valid = bfin_pm_valid, }; static int __init bfin_pm_init(void) { - pm_set_ops(&bfin_pm_ops); + suspend_set_ops(&bfin_pm_ops); return 0; } diff --git a/arch/blackfin/oprofile/Kconfig b/arch/blackfin/oprofile/Kconfig deleted file mode 100644 index 0a2fd99..0000000 --- a/arch/blackfin/oprofile/Kconfig +++ /dev/null @@ -1,29 +0,0 @@ -menu "Profiling support" -depends on EXPERIMENTAL - -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -config HARDWARE_PM - tristate "Hardware Performance Monitor Profiling" - depends on PROFILING - help - take use of hardware performance monitor to profiling the kernel - and application. - - If unsure, say N. - -endmenu diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 6b4d026..21900a9 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -196,6 +196,8 @@ source "sound/Kconfig" source "drivers/usb/Kconfig" +source "kernel/Kconfig.instrumentation" + source "arch/cris/Kconfig.debug" source "security/Kconfig" diff --git a/arch/cris/arch-v10/Kconfig b/arch/cris/arch-v10/Kconfig index c7ea9ef..f1ce6f6 100644 --- a/arch/cris/arch-v10/Kconfig +++ b/arch/cris/arch-v10/Kconfig @@ -182,7 +182,7 @@ config ETRAX_LED7G set this to same as CONFIG_ETRAX_LED1G (normally 2). config ETRAX_LED8Y - int "Eigth yellow LED bit" + int "Eighth yellow LED bit" depends on ETRAX_CSP0_LEDS default "2" help diff --git a/arch/cris/arch-v10/boot/compressed/misc.c b/arch/cris/arch-v10/boot/compressed/misc.c index ffb8d21..e205d2e 100644 --- a/arch/cris/arch-v10/boot/compressed/misc.c +++ b/arch/cris/arch-v10/boot/compressed/misc.c @@ -8,7 +8,7 @@ * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * puts by Nick Holloway 1993, better puts by Martin Mares 1995 - * adoptation for Linux/CRIS Axis Communications AB, 1999 + * adaptation for Linux/CRIS Axis Communications AB, 1999 * */ diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c index 1de0026..c263b82 100644 --- a/arch/cris/arch-v10/drivers/pcf8563.c +++ b/arch/cris/arch-v10/drivers/pcf8563.c @@ -4,7 +4,7 @@ * From Phillips' datasheet: * * The PCF8563 is a CMOS real-time clock/calendar optimized for low power - * consumption. A programmable clock output, interupt output and voltage + * consumption. A programmable clock output, interrupt output and voltage * low detector are also provided. All address and data are transferred * serially via two-line bidirectional I2C-bus. Maximum bus speed is * 400 kbits/s. The built-in word address register is incremented diff --git a/arch/cris/arch-v10/kernel/debugport.c b/arch/cris/arch-v10/kernel/debugport.c index 2b536ca..93679a4 100644 --- a/arch/cris/arch-v10/kernel/debugport.c +++ b/arch/cris/arch-v10/kernel/debugport.c @@ -83,7 +83,7 @@ * * Revision 1.4 2002/11/19 14:35:24 starvik * Changes from linux 2.4 - * Changed struct initializer syntax to the currently prefered notation + * Changed struct initializer syntax to the currently preferred notation * * Revision 1.3 2002/11/06 09:47:03 starvik * Modified for new interrupt macros diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S index ae45d45..c5844cb 100644 --- a/arch/cris/arch-v10/kernel/entry.S +++ b/arch/cris/arch-v10/kernel/entry.S @@ -97,7 +97,7 @@ * * Revision 1.36 2001/11/22 13:36:36 bjornw * * In ret_from_intr, check regs->dccr for usermode reentrance instead of - * DCCR explicitely (because the latter might not reflect current reality) + * DCCR explicitly (because the latter might not reflect current reality) * * In mmu_bus_fault, set $r9 _after_ calling the C-code instead of before * since $r9 is call-clobbered and is potentially needed afterwards * diff --git a/arch/cris/arch-v10/kernel/fasttimer.c b/arch/cris/arch-v10/kernel/fasttimer.c index 8cbdf59..d3ea052 100644 --- a/arch/cris/arch-v10/kernel/fasttimer.c +++ b/arch/cris/arch-v10/kernel/fasttimer.c @@ -84,7 +84,7 @@ * with time based on jiffies and *R_TIMER0_DATA, uses a table * for fast conversion of timer value to microseconds. * (Much faster the standard do_gettimeofday() and we don't really - * wan't to use the true time - we wan't the "uptime" so timers don't screw up + * want to use the true time - we want the "uptime" so timers don't screw up * when we change the time. * TODO: Add efficient support for continuous timers as well. * diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c index 96094cb..845c95f 100644 --- a/arch/cris/arch-v10/kernel/irq.c +++ b/arch/cris/arch-v10/kernel/irq.c @@ -169,7 +169,7 @@ init_IRQ(void) for (i = 0; i < 256; i++) etrax_irv->v[i] = weird_irq; - /* Initialize IRQ handler descriptiors. */ + /* Initialize IRQ handler descriptors. */ for(i = 2; i < NR_IRQS; i++) { irq_desc[i].chip = &crisv10_irq_type; set_int_vector(i, interrupt[i]); diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c index 07628a1..77f4b14 100644 --- a/arch/cris/arch-v10/kernel/kgdb.c +++ b/arch/cris/arch-v10/kernel/kgdb.c @@ -959,7 +959,7 @@ stub_is_stopped(int sigval) /* Send register contents. We probably only need to send the * PC, frame pointer and stack pointer here. Other registers will be - * explicitely asked for. But for now, send all. + * explicitly asked for. But for now, send all. */ for (regno = R0; regno <= USP; regno++) { diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index b6831ce..1a3760c 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c @@ -64,7 +64,7 @@ void hard_reset_now (void) #if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM) cause_of_death = 0xbedead; #else - /* Since we dont plan to keep on reseting the watchdog, + /* Since we dont plan to keep on resetting the watchdog, the key can be arbitrary hence three */ *R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, 3) | IO_STATE(R_WATCHDOG, enable, start); diff --git a/arch/cris/arch-v10/kernel/shadows.c b/arch/cris/arch-v10/kernel/shadows.c index 38fd44d..326178a 100644 --- a/arch/cris/arch-v10/kernel/shadows.c +++ b/arch/cris/arch-v10/kernel/shadows.c @@ -20,7 +20,7 @@ unsigned long r_timer_ctrl_shadow; * These are only usable if there actually IS a latch connected * to the corresponding external chip-select pin. * - * A common usage is that CSP0 controls LED's and CSP4 video chips. + * A common usage is that CSP0 controls LEDs and CSP4 video chips. */ unsigned long port_cse1_shadow; diff --git a/arch/cris/arch-v10/lib/dram_init.S b/arch/cris/arch-v10/lib/dram_init.S index 9cf8393..6a6bdfd 100644 --- a/arch/cris/arch-v10/lib/dram_init.S +++ b/arch/cris/arch-v10/lib/dram_init.S @@ -40,7 +40,7 @@ * Copy warning from head.S about r8 and r9 * * Revision 1.7 2001/04/18 12:05:39 bjornw - * Fixed comments, and explicitely include config.h to be sure its there + * Fixed comments, and explicitly include config.h to be sure its there * * Revision 1.6 2001/04/10 06:20:16 starvik * Delay should be 200us, not 200ns @@ -66,7 +66,7 @@ */ /* Just to be certain the config file is included, we include it here - * explicitely instead of depending on it being included in the file that + * explicitly instead of depending on it being included in the file that * uses this code. */ diff --git a/arch/cris/arch-v10/lib/string.c b/arch/cris/arch-v10/lib/string.c index 8ffde49..15d6662 100644 --- a/arch/cris/arch-v10/lib/string.c +++ b/arch/cris/arch-v10/lib/string.c @@ -41,7 +41,7 @@ void *memcpy(void *pdst, Make sure the compiler is able to make something useful of this. As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). - If gcc was allright, it really would need no temporaries, and no + If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register void *return_dst __asm__ ("r10") = pdst; diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c index 43778d5..a12c708 100644 --- a/arch/cris/arch-v10/lib/usercopy.c +++ b/arch/cris/arch-v10/lib/usercopy.c @@ -38,7 +38,7 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn) As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. - If gcc was allright, it really would need no temporaries, and no + If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pdst; @@ -200,7 +200,7 @@ __copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn) As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. - If gcc was allright, it really would need no temporaries, and no + If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pdst; @@ -380,7 +380,7 @@ __do_clear_user (void __user *pto, unsigned long pn) As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. - If gcc was allright, it really would need no temporaries, and no + If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pto; diff --git a/arch/cris/arch-v32/boot/compressed/misc.c b/arch/cris/arch-v32/boot/compressed/misc.c index 1190269..0169ba1 100644 --- a/arch/cris/arch-v32/boot/compressed/misc.c +++ b/arch/cris/arch-v32/boot/compressed/misc.c @@ -8,7 +8,7 @@ * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * puts by Nick Holloway 1993, better puts by Martin Mares 1995 - * adoptation for Linux/CRIS Axis Communications AB, 1999 + * adaptation for Linux/CRIS Axis Communications AB, 1999 * */ @@ -151,7 +151,7 @@ serout(const char *s, reg_scope_instances regi_ser) do { rs = REG_RD(ser, regi_ser, rs_stat_din); } - while (!rs.tr_rdy);/* Wait for tranceiver. */ + while (!rs.tr_rdy);/* Wait for transceiver. */ REG_WR(ser, regi_ser, rw_dout, dout); } @@ -264,7 +264,7 @@ serial_setup(reg_scope_instances regi_ser) tr_ctrl.stop_bits = 1; /* 2 stop bits. */ /* - * The baudrate setup is a bit fishy, but in the end the tranceiver is + * The baudrate setup is a bit fishy, but in the end the transceiver is * set to 4800 and the receiver to 115200. The magic value is * 29.493 MHz. */ diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c index 5180d45..3ec12ea 100644 --- a/arch/cris/arch-v32/drivers/axisflashmap.c +++ b/arch/cris/arch-v32/drivers/axisflashmap.c @@ -205,7 +205,7 @@ static struct mtd_info *probe_cs(struct map_info *map_cs) /* * Probe each chip select individually for flash chips. If there are chips on * both cse0 and cse1, the mtd_info structs will be concatenated to one struct - * so that MTD partitions can cross chip boundries. + * so that MTD partitions can cross chip boundaries. * * The only known restriction to how you can mount your chips is that each * chip select must hold similar flash chips. But you need external hardware diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c index e12f6cc..f1edd2e 100644 --- a/arch/cris/arch-v32/drivers/i2c.c +++ b/arch/cris/arch-v32/drivers/i2c.c @@ -275,7 +275,7 @@ i2c_getack(void) ack = 0; i2c_delay(CLOCK_HIGH_TIME/2); if(!ack){ - if(!i2c_getbit()) /* receiver pulld SDA low */ + if(!i2c_getbit()) /* receiver pulled SDA low */ ack = 1; i2c_delay(CLOCK_HIGH_TIME/2); } diff --git a/arch/cris/arch-v32/drivers/nandflash.c b/arch/cris/arch-v32/drivers/nandflash.c index 93ddea4d..5ce015c 100644 --- a/arch/cris/arch-v32/drivers/nandflash.c +++ b/arch/cris/arch-v32/drivers/nandflash.c @@ -138,7 +138,7 @@ struct mtd_info* __init crisv32_nand_flash_probe (void) /* Enable the following for a flash based bad block table */ this->options = NAND_USE_FLASH_BBT; - /* Scan to find existance of the device */ + /* Scan to find existence of the device */ if (nand_scan (crisv32_mtd, 1)) { err = -ENXIO; goto out_ior; diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c index da479a1..6dbd700 100644 --- a/arch/cris/arch-v32/drivers/pcf8563.c +++ b/arch/cris/arch-v32/drivers/pcf8563.c @@ -4,7 +4,7 @@ * From Phillips' datasheet: * * The PCF8563 is a CMOS real-time clock/calendar optimized for low power - * consumption. A programmable clock output, interupt output and voltage + * consumption. A programmable clock output, interrupt output and voltage * low detector are also provided. All address and data are transferred * serially via two-line bidirectional I2C-bus. Maximum bus speed is * 400 kbits/s. The built-in word address register is incremented diff --git a/arch/cris/arch-v32/kernel/fasttimer.c b/arch/cris/arch-v32/kernel/fasttimer.c index 79e1e4c..b40551f 100644 --- a/arch/cris/arch-v32/kernel/fasttimer.c +++ b/arch/cris/arch-v32/kernel/fasttimer.c @@ -97,7 +97,7 @@ * with time based on jiffies and *R_TIMER0_DATA, uses a table * for fast conversion of timer value to microseconds. * (Much faster the standard do_gettimeofday() and we don't really - * wan't to use the true time - we wan't the "uptime" so timers don't screw up + * want to use the true time - we want the "uptime" so timers don't screw up * when we change the time. * TODO: Add efficient support for continuous timers as well. * diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c index cc361bf..a9acaa27 100644 --- a/arch/cris/arch-v32/kernel/irq.c +++ b/arch/cris/arch-v32/kernel/irq.c @@ -140,7 +140,7 @@ block_irq(int irq, int cpu) spin_lock_irqsave(&irq_lock, flags); intr_mask = REG_RD_INT(intr_vect, irq_regs[cpu], rw_mask); - /* Remember; 1 let thru, 0 block. */ + /* Remember; 1 let through, 0 block. */ intr_mask &= ~(1 << (irq - FIRST_IRQ)); REG_WR_INT(intr_vect, irq_regs[cpu], rw_mask, intr_mask); @@ -156,7 +156,7 @@ unblock_irq(int irq, int cpu) spin_lock_irqsave(&irq_lock, flags); intr_mask = REG_RD_INT(intr_vect, irq_regs[cpu], rw_mask); - /* Remember; 1 let thru, 0 block. */ + /* Remember; 1 let through, 0 block. */ intr_mask |= (1 << (irq - FIRST_IRQ)); REG_WR_INT(intr_vect, irq_regs[cpu], rw_mask, intr_mask); @@ -308,7 +308,7 @@ crisv32_do_multiple(struct pt_regs* regs) */ irq_enter(); - /* Get which IRQs that happend. */ + /* Get which IRQs that happened. */ masked = REG_RD_INT(intr_vect, irq_regs[cpu], r_masked_vect); /* Calculate new IRQ mask with these IRQs disabled. */ @@ -366,7 +366,7 @@ init_IRQ(void) for (i = 0; i < 256; i++) etrax_irv->v[i] = weird_irq; - /* Point all IRQ's to bad handlers. */ + /* Point all IRQs to bad handlers. */ for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) { irq_desc[j].chip = &crisv32_irq_type; set_exception_vector(i, interrupt[j]); diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c index 6326351..b72a155 100644 --- a/arch/cris/arch-v32/kernel/process.c +++ b/arch/cris/arch-v32/kernel/process.c @@ -162,7 +162,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp, /* Put the switch stack right below the pt_regs. */ swstack = ((struct switch_stack *) childregs) - 1; - /* Paramater to ret_from_sys_call. 0 is don't restart the syscall. */ + /* Parameter to ret_from_sys_call. 0 is don't restart the syscall. */ swstack->r9 = 0; /* diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c index 7cd6ac8..024cc69 100644 --- a/arch/cris/arch-v32/kernel/signal.c +++ b/arch/cris/arch-v32/kernel/signal.c @@ -347,7 +347,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) /* Grab and setup a signal frame. * * Basically a lot of state-info is stacked, and arranged for the - * user-mode program to return to the kernel using either a trampiline + * user-mode program to return to the kernel using either a trampoline * which performs the syscall sigreturn(), or a provided user-mode * trampoline. */ @@ -641,7 +641,7 @@ ugdb_trap_user(struct thread_info *ti, int sig) user_regs(ti)->spc = 0; } /* FIXME: Filter out false h/w breakpoint hits (i.e. EDA - not withing any configured h/w breakpoint range). Synchronize with + not within any configured h/w breakpoint range). Synchronize with what already exists for kernel debugging. */ if (((user_regs(ti)->exs & 0xff00) >> 8) == BREAK_8_INTR_VECT) { /* Break 8: subtract 2 from ERP unless in a delay slot. */ diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 697494b..171c96e 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -142,7 +142,7 @@ smp_boot_one_cpu(int cpuid) return -1; } -/* Secondary CPUs starts uing C here. Here we need to setup CPU +/* Secondary CPUs starts using C here. Here we need to setup CPU * specific stuff such as the local timer and the MMU. */ void __init smp_callin(void) { diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c index be0a016..2f7e8e2 100644 --- a/arch/cris/arch-v32/kernel/time.c +++ b/arch/cris/arch-v32/kernel/time.c @@ -99,7 +99,7 @@ unsigned long do_slow_gettimeoffset(void) /* From timer MDS describing the hardware watchdog: * 4.3.1 Watchdog Operation * The watchdog timer is an 8-bit timer with a configurable start value. - * Once started the whatchdog counts downwards with a frequency of 763 Hz + * Once started the watchdog counts downwards with a frequency of 763 Hz * (100/131072 MHz). When the watchdog counts down to 1, it generates an * NMI (Non Maskable Interrupt), and when it counts down to 0, it resets the * chip. diff --git a/arch/cris/arch-v32/kernel/traps.c b/arch/cris/arch-v32/kernel/traps.c index 2462b1e..17fd3db 100644 --- a/arch/cris/arch-v32/kernel/traps.c +++ b/arch/cris/arch-v32/kernel/traps.c @@ -105,7 +105,7 @@ bad_value: /* * This gets called from entry.S when the watchdog has bitten. Show something - * similiar to an Oops dump, and if the kernel if configured to be a nice doggy; + * similar to an Oops dump, and if the kernel is configured to be a nice doggy; * halt instead of reboot. */ void diff --git a/arch/cris/arch-v32/lib/dram_init.S b/arch/cris/arch-v32/lib/dram_init.S index 158b3db..218fbe2 100644 --- a/arch/cris/arch-v32/lib/dram_init.S +++ b/arch/cris/arch-v32/lib/dram_init.S @@ -12,7 +12,7 @@ */ /* Just to be certain the config file is included, we include it here - * explicitely instead of depending on it being included in the file that + * explicitly instead of depending on it being included in the file that * uses this code. */ diff --git a/arch/cris/arch-v32/lib/string.c b/arch/cris/arch-v32/lib/string.c index 98e282a..6740b2c 100644 --- a/arch/cris/arch-v32/lib/string.c +++ b/arch/cris/arch-v32/lib/string.c @@ -41,7 +41,7 @@ void *memcpy(void *pdst, Make sure the compiler is able to make something useful of this. As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). - If gcc was allright, it really would need no temporaries, and no + If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register void *return_dst __asm__ ("r10") = pdst; diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c index f0b0846..04d0cf3 100644 --- a/arch/cris/arch-v32/lib/usercopy.c +++ b/arch/cris/arch-v32/lib/usercopy.c @@ -34,7 +34,7 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn) As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. - If gcc was allright, it really would need no temporaries, and no + If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pdst; @@ -168,7 +168,7 @@ __copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn) As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. - If gcc was allright, it really would need no temporaries, and no + If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pdst; @@ -332,7 +332,7 @@ __do_clear_user (void __user *pto, unsigned long pn) As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. - If gcc was allright, it really would need no temporaries, and no + If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pto; diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c index c2d12e9..a076ef6 100644 --- a/arch/cris/arch-v32/mm/tlb.c +++ b/arch/cris/arch-v32/mm/tlb.c @@ -30,8 +30,8 @@ do { \ * The TLB can host up to 256 different mm contexts at the same time. The running * context is found in the PID register. Each TLB entry contains a page_id that * has to match the PID register to give a hit. page_id_map keeps track of which - * mm's is assigned to which page_id's, making sure it's known when to - * invalidate TLB entries. + * mm is assigned to which page_id, making sure it's known when to invalidate TLB + * entries. * * The last page_id is never running, it is used as an invalid page_id so that * it's possible to make TLB entries that will nerver match. @@ -188,7 +188,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, spin_unlock(&mmu_context_lock); /* - * Remember the pgd for the fault handlers. Keep a seperate copy of it + * Remember the pgd for the fault handlers. Keep a separate copy of it * because current and active_mm might be invalid at points where * there's still a need to derefer the pgd. */ diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 903ea62..5c27ff8 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -7,7 +7,7 @@ * Authors: Bjorn Wesen (bjornw@axis.com) * * This file contains the code used by various IRQ handling routines: - * asking for different IRQ's should be done through these routines + * asking for different IRQs should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. @@ -15,7 +15,7 @@ */ /* - * IRQ's are in fact implemented a bit like signal handlers for the kernel. + * IRQs are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ @@ -83,9 +83,9 @@ skip: /* called by the assembler IRQ entry functions defined in irq.h - * to dispatch the interrupts to registred handlers + * to dispatch the interrupts to registered handlers * interrupts are disabled upon entry - depending on if the - * interrupt was registred with IRQF_DISABLED or not, interrupts + * interrupt was registered with IRQF_DISABLED or not, interrupts * are re-enabled or not. */ diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 8aab814..3034f3f 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -13,7 +13,7 @@ * Fixed warning. * * Revision 1.18 2005/01/12 08:10:14 starvik - * Readded the change of frametype when handling kernel page fault fixup + * Re-added the change of frametype when handling kernel page fault fixup * for v10. This is necessary to avoid that the CPU remakes the faulting * access. * @@ -49,7 +49,7 @@ * * Revision 1.8 2003/07/04 13:02:48 tobiasa * Moved code snippet from arch/cris/mm/fault.c that searches for fixup code - * to seperate function in arch-specific files. + * to separate function in arch-specific files. * * Revision 1.7 2003/01/22 06:48:38 starvik * Fixed warnings issued by GCC 3.2.1 diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c index b7842ff..0c833d1 100644 --- a/arch/cris/mm/init.c +++ b/arch/cris/mm/init.c @@ -8,7 +8,7 @@ * * $Log: init.c,v $ * Revision 1.11 2004/05/28 09:28:56 starvik - * Calculation of loops_per_usec moved because initalization order has changed + * Calculation of loops_per_usec moved because initialization order has changed * in Linux 2.6. * * Revision 1.10 2004/05/14 07:58:05 starvik diff --git a/arch/cris/mm/tlb.c b/arch/cris/mm/tlb.c index c4a98e2..b7f8de5 100644 --- a/arch/cris/mm/tlb.c +++ b/arch/cris/mm/tlb.c @@ -16,7 +16,7 @@ /* The TLB can host up to 64 different mm contexts at the same time. * The running context is R_MMU_CONTEXT, and each TLB entry contains a * page_id that has to match to give a hit. In page_id_map, we keep track - * of which mm's we have assigned which page_id's, so that we know when + * of which mm we have assigned to which page_id, so that we know when * to invalidate TLB entries. * * The last page_id is never running - it is used as an invalid page_id diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 74eef71..43153e7 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -375,6 +375,8 @@ source "drivers/Kconfig" source "fs/Kconfig" +source "kernel/Kconfig.instrumentation" + source "arch/frv/Kconfig.debug" source "security/Kconfig" diff --git a/arch/frv/kernel/irq-mb93091.c b/arch/frv/kernel/irq-mb93091.c index ad753c1..9e38f99 100644 --- a/arch/frv/kernel/irq-mb93091.c +++ b/arch/frv/kernel/irq-mb93091.c @@ -17,10 +17,10 @@ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/bitops.h> #include <asm/io.h> #include <asm/system.h> -#include <asm/bitops.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> diff --git a/arch/frv/kernel/irq-mb93093.c b/arch/frv/kernel/irq-mb93093.c index e0983f6..3c2752c 100644 --- a/arch/frv/kernel/irq-mb93093.c +++ b/arch/frv/kernel/irq-mb93093.c @@ -17,10 +17,10 @@ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/bitops.h> #include <asm/io.h> #include <asm/system.h> -#include <asm/bitops.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> diff --git a/arch/frv/kernel/irq-mb93493.c b/arch/frv/kernel/irq-mb93493.c index c157eef..7754c73 100644 --- a/arch/frv/kernel/irq-mb93493.c +++ b/arch/frv/kernel/irq-mb93493.c @@ -17,10 +17,10 @@ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/bitops.h> #include <asm/io.h> #include <asm/system.h> -#include <asm/bitops.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index c7e59dc..73abae7 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -24,12 +24,12 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/module.h> +#include <linux/bitops.h> #include <asm/atomic.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/system.h> -#include <asm/bitops.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/delay.h> @@ -134,7 +134,7 @@ static struct irq_chip frv_cpu_pic = { }; /* - * handles all normal device IRQ's + * handles all normal device IRQs * - registers are referred to by the __frame variable (GR28) * - IRQ distribution is complicated in this arch because of the many PICs, the * way they work and the way they cascade diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c index 8e182ce..7ee3a14 100644 --- a/arch/frv/kernel/semaphore.c +++ b/arch/frv/kernel/semaphore.c @@ -139,7 +139,7 @@ void __up(struct semaphore *sem) waiter = list_entry(sem->wait_list.next, struct sem_waiter, list); /* We must be careful not to touch 'waiter' after we set ->task = NULL. - * It is an allocated on the waiter's stack and may become invalid at + * It is allocated on the waiter's stack and may become invalid at * any time after that point (due to a wakeup from another source). */ list_del_init(&waiter->list); diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c index e83e0bc..925fb01 100644 --- a/arch/frv/kernel/time.c +++ b/arch/frv/kernel/time.c @@ -66,7 +66,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy) /* * Here we are in the timer irq handler. We just have irqs locally * disabled but we don't know if the timer_bh is running on the other - * CPU. We need to avoid to SMP race with it. NOTE: we don' t need + * CPU. We need to avoid to SMP race with it. NOTE: we don't need * the irq version of write_lock because as just said we have irq * locally disabled. -arca */ @@ -126,7 +126,7 @@ void time_init(void) /* FIX by dqg : Set to zero for platforms that don't have tod */ /* without this time is undefined and can overflow time_t, causing */ - /* very stange errors */ + /* very strange errors */ year = 1980; mon = day = 1; hour = min = sec = 0; diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index e35f74e..e2e9f57 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -223,6 +223,8 @@ endmenu source "fs/Kconfig" +source "kernel/Kconfig.instrumentation" + source "arch/h8300/Kconfig.debug" source "security/Kconfig" diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug index 996d97e..ee671c3 100644 --- a/arch/h8300/Kconfig.debug +++ b/arch/h8300/Kconfig.debug @@ -42,16 +42,16 @@ config SH_STANDARD_BIOS Require eCos/RedBoot config DEFAULT_CMDLINE - bool "Use buildin commandline" + bool "Use builtin commandline" default n help - buildin kernel commandline enabled. + builtin kernel commandline enabled. config KERNEL_COMMAND string "Buildin commmand string" depends on DEFAULT_CMDLINE help - buildin kernel commandline strings. + builtin kernel commandline strings. config BLKDEV_RESERVE bool "BLKDEV Reserved Memory" diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index 43d21e9..8dec4dd 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c @@ -68,7 +68,7 @@ static void h8300_shutdown_irq(unsigned int irq) } /* - * h8300 interrupt controler implementation + * h8300 interrupt controller implementation */ struct irq_chip h8300irq_chip = { .name = "H8300-INTC", diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c index 3306382..e37c835 100644 --- a/arch/h8300/kernel/time.c +++ b/arch/h8300/kernel/time.c @@ -53,7 +53,7 @@ void time_init(void) /* FIX by dqg : Set to zero for platforms that don't have tod */ /* without this time is undefined and can overflow time_t, causing */ - /* very stange errors */ + /* very strange errors */ year = 1980; mon = day = 1; hour = min = sec = 0; diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index f971830..f8f7d7ea 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -5,7 +5,7 @@ * Cloned from Linux/m68k. * * No original Copyright holder listed, - * Probabily original (C) Roman Zippel (assigned DJD, 1999) + * Probable original (C) Roman Zippel (assigned DJD, 1999) * * Copyright 1999-2000 D. Jeff Dionne, <jeff@rt-control.com> * diff --git a/arch/h8300/platform/h8s/ints.c b/arch/h8300/platform/h8s/ints.c index a71d6e2..551fd5f 100644 --- a/arch/h8300/platform/h8s/ints.c +++ b/arch/h8300/platform/h8s/ints.c @@ -179,7 +179,7 @@ int request_irq(unsigned int irq, if (use_kmalloc) irq_handle = kmalloc(sizeof(irq_handler_t), GFP_ATOMIC); else { - /* use bootmem allocater */ + /* use bootmem allocator */ irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t)); irq_handle = (irq_handler_t *)((unsigned long)irq_handle | 0x80000000); } diff --git a/arch/h8300/platform/h8s/ints_h8s.c b/arch/h8300/platform/h8s/ints_h8s.c index 93395d2..faa8a45 100644 --- a/arch/h8300/platform/h8s/ints_h8s.c +++ b/arch/h8300/platform/h8s/ints_h8s.c @@ -63,7 +63,7 @@ static const struct irq_pins irq_assign_table1[16]={ {H8300_GPIO_P2,H8300_GPIO_B6},{H8300_GPIO_P2,H8300_GPIO_B7}, }; -/* IRQ to GPIO pinno transrate */ +/* IRQ to GPIO pin translation */ #define IRQ_GPIO_MAP(irqbit,irq,port,bit) \ do { \ if (*(volatile unsigned short *)ITSR & irqbit) { \ diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index b84d505..f6e44fc 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -392,7 +392,7 @@ config X86_MCE_NONFATAL will look at the machine check registers to see if anything happened. Non-fatal problems automatically get corrected (but still logged). Disable this if you don't want to see these messages. - Seeing the messages this option prints out may be indicative of dying hardware, + Seeing the messages this option prints out may be indicative of dying or out-of-spec (ie, overclocked) hardware. This option only does something on certain CPUs. (AMD Athlon/Duron and Intel Pentium 4) @@ -631,7 +631,7 @@ config NUMA default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) help - NUMA support for i386. This is currently high experimental + NUMA support for i386. This is currently highly experimental and should be only used for kernel development. It might also cause boot failures. @@ -1080,7 +1080,9 @@ config APM_REAL_MODE_POWER_OFF endif # APM -source "arch/x86/kernel/cpu/cpufreq/Kconfig" +source "arch/x86/kernel/cpu/cpufreq/Kconfig_32" + +source "drivers/cpuidle/Kconfig" endmenu @@ -1256,31 +1258,6 @@ source "drivers/Kconfig" source "fs/Kconfig" -menuconfig INSTRUMENTATION - bool "Instrumentation Support" - default y - ---help--- - Say Y here to get to see options related to performance measurement, - debugging, and testing. This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and disabled. - -if INSTRUMENTATION - -source "arch/x86/oprofile/Kconfig" - -config KPROBES - bool "Kprobes" - depends on KALLSYMS && MODULES - help - Kprobes allows you to trap at almost any kernel address and - execute a callback function. register_kprobe() establishes - a probepoint and specifies the callback. Kprobes is useful - for kernel debugging, non-intrusive instrumentation and testing. - If in doubt, say "N". - -endif # INSTRUMENTATION - source "arch/i386/Kconfig.debug" source "security/Kconfig" diff --git a/arch/i386/Makefile b/arch/i386/Makefile index f036d2d..b88e47c 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile @@ -102,7 +102,7 @@ core-$(CONFIG_XEN) += arch/x86/xen/ # default subarch .h files mflags-y += -Iinclude/asm-x86/mach-default -head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task_32.o +head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task.o libs-y += arch/x86/lib/ core-y += arch/x86/kernel/ \ @@ -131,9 +131,9 @@ all: bzImage zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage zImage bzImage: vmlinux - $(Q)mkdir -p $(objtree)/arch/i386/boot - $(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) + $(Q)mkdir -p $(objtree)/arch/i386/boot + $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage compressed: zImage diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index c60532d..bef4772 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -452,9 +452,9 @@ config IA64_PALINFO config IA64_MC_ERR_INJECT tristate "MC error injection support" help - Selets whether support for MC error injection. By enabling the - support, kernel provide sysfs interface for user application to - call MC error injection PAL procedure to inject various errors. + Adds support for MC error injection. If enabled, the kernel + will provide a sysfs interface for user applications to + call MC error injection PAL procedures to inject various errors. This is a useful tool for MCA testing. If you're unsure, do not select this option. @@ -491,7 +491,7 @@ config KEXEC but it is independent of the system firmware. And like a reboot you can start any kernel with it, not just Linux. - The name comes from the similiarity to the exec system call. + The name comes from the similarity to the exec system call. It is an ongoing process to be certain the hardware in a machine is properly shutdown, so do not be surprised if this code does not @@ -592,20 +592,7 @@ config IRQ_PER_CPU source "arch/ia64/hp/sim/Kconfig" -menu "Instrumentation Support" - -source "arch/ia64/oprofile/Kconfig" - -config KPROBES - bool "Kprobes" - depends on KALLSYMS && MODULES - help - Kprobes allows you to trap at almost any kernel address and - execute a callback function. register_kprobe() establishes - a probepoint and specifies the callback. Kprobes is useful - for kernel debugging, non-intrusive instrumentation and testing. - If in doubt, say "N". -endmenu +source "kernel/Kconfig.instrumentation" source "arch/ia64/Kconfig.debug" diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index 449d3e7..75fd90d 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig @@ -26,6 +26,7 @@ CONFIG_TASK_IO_ACCOUNTING=y # CONFIG_AUDIT is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=20 +CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_SYSFS_DEPRECATED=y CONFIG_RELAY=y diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index a3405b3..d025a22 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -773,7 +773,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro if (flags & MAP_SHARED) printk(KERN_INFO "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", - current->comm, current->pid, start); + current->comm, task_pid_nr(current), start); ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, off); if (IS_ERR((void *) ret)) @@ -786,7 +786,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro if (flags & MAP_SHARED) printk(KERN_INFO "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", - current->comm, current->pid, end); + current->comm, task_pid_nr(current), end); ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, (off + len) - offset_in_page(end)); if (IS_ERR((void *) ret)) @@ -816,7 +816,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro if ((flags & MAP_SHARED) && !is_congruent) printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " - "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off); + "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off); DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, is_congruent ? "congruent" : "not congruent", poff); diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 73ca86d..8e4894b 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -967,7 +967,7 @@ find_memmap_space (void) * to use. We can allocate partial granules only if the unavailable * parts exist, and are WB. */ -void +unsigned long efi_memmap_init(unsigned long *s, unsigned long *e) { struct kern_memdesc *k, *prev = NULL; @@ -1084,6 +1084,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e) /* reserve the memory we are using for kern_memmap */ *s = (u64)kern_memmap; *e = (u64)++k; + + return total_mem; } void diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f55fa07..59169bf7 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -158,14 +158,14 @@ */ #define PROTECT_CTX(c, f) \ do { \ - DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \ spin_lock_irqsave(&(c)->ctx_lock, f); \ - DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \ } while(0) #define UNPROTECT_CTX(c, f) \ do { \ - DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \ spin_unlock_irqrestore(&(c)->ctx_lock, f); \ } while(0) @@ -227,12 +227,12 @@ #ifdef PFM_DEBUGGING #define DPRINT(a) \ do { \ - if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ + if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ } while (0) #define DPRINT_ovfl(a) \ do { \ - if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ + if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ } while (0) #endif @@ -913,7 +913,7 @@ pfm_mask_monitoring(struct task_struct *task) unsigned long mask, val, ovfl_mask; int i; - DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); + DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task))); ovfl_mask = pmu_conf->ovfl_val; /* @@ -992,12 +992,12 @@ pfm_restore_monitoring(struct task_struct *task) ovfl_mask = pmu_conf->ovfl_val; if (task != current) { - printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); + printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current)); return; } if (ctx->ctx_state != PFM_CTX_MASKED) { printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, - task->pid, current->pid, ctx->ctx_state); + task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); return; } psr = pfm_get_psr(); @@ -1051,7 +1051,8 @@ pfm_restore_monitoring(struct task_struct *task) if ((mask & 0x1) == 0UL) continue; ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; ia64_set_pmc(i, ctx->th_pmcs[i]); - DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i])); + DPRINT(("[%d] pmc[%d]=0x%lx\n", + task_pid_nr(task), i, ctx->th_pmcs[i])); } ia64_srlz_d(); @@ -1370,7 +1371,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) error_conflict: DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", - pfm_sessions.pfs_sys_session[cpu]->pid, + task_pid_nr(pfm_sessions.pfs_sys_session[cpu]), cpu)); abort: UNLOCK_PFS(flags); @@ -1442,7 +1443,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz /* sanity checks */ if (task->mm == NULL || size == 0UL || vaddr == NULL) { - printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm); + printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm); return -EINVAL; } @@ -1459,7 +1460,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz up_write(&task->mm->mmap_sem); if (r !=0) { - printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size); + printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); } DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); @@ -1501,7 +1502,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx) return 0; invalid_free: - printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid); + printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current)); return -EINVAL; } #endif @@ -1547,13 +1548,13 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) unsigned long flags; DECLARE_WAITQUEUE(wait, current); if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); return -EINVAL; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); return -EINVAL; } @@ -1607,7 +1608,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) PROTECT_CTX(ctx, flags); } - DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret)); + DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret)); set_current_state(TASK_RUNNING); remove_wait_queue(&ctx->ctx_msgq_wait, &wait); @@ -1616,7 +1617,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) ret = -EINVAL; msg = pfm_get_next_msg(ctx); if (msg == NULL) { - printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid); + printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current)); goto abort_locked; } @@ -1647,13 +1648,13 @@ pfm_poll(struct file *filp, poll_table * wait) unsigned int mask = 0; if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); return 0; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); return 0; } @@ -1692,7 +1693,7 @@ pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on) ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", - current->pid, + task_pid_nr(current), fd, on, ctx->ctx_async_queue, ret)); @@ -1707,13 +1708,13 @@ pfm_fasync(int fd, struct file *filp, int on) int ret; if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current)); return -EBADF; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } /* @@ -1759,7 +1760,7 @@ pfm_syswide_force_stop(void *info) if (owner != ctx->ctx_task) { printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", smp_processor_id(), - owner->pid, ctx->ctx_task->pid); + task_pid_nr(owner), task_pid_nr(ctx->ctx_task)); return; } if (GET_PMU_CTX() != ctx) { @@ -1769,7 +1770,7 @@ pfm_syswide_force_stop(void *info) return; } - DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid)); + DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task))); /* * the context is already protected in pfm_close(), we simply * need to mask interrupts to avoid a PMU interrupt race on @@ -1821,7 +1822,7 @@ pfm_flush(struct file *filp, fl_owner_t id) ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } @@ -1969,7 +1970,7 @@ pfm_close(struct inode *inode, struct file *filp) ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } @@ -2066,7 +2067,7 @@ pfm_close(struct inode *inode, struct file *filp) */ ctx->ctx_state = PFM_CTX_ZOMBIE; - DPRINT(("zombie ctx for [%d]\n", task->pid)); + DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task))); /* * cannot free the context on the spot. deferred until * the task notices the ZOMBIE state @@ -2472,7 +2473,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t /* invoke and lock buffer format, if found */ fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); if (fmt == NULL) { - DPRINT(("[%d] cannot find buffer format\n", task->pid)); + DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task))); return -EINVAL; } @@ -2483,7 +2484,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); - DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret)); + DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret)); if (ret) goto error; @@ -2605,23 +2606,23 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) * no kernel task or task not owner by caller */ if (task->mm == NULL) { - DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid)); + DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task))); return -EPERM; } if (pfm_bad_permissions(task)) { - DPRINT(("no permission to attach to [%d]\n", task->pid)); + DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task))); return -EPERM; } /* * cannot block in self-monitoring mode */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { - DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid)); + DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task))); return -EINVAL; } if (task->exit_state == EXIT_ZOMBIE) { - DPRINT(("cannot attach to zombie task [%d]\n", task->pid)); + DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task))); return -EBUSY; } @@ -2631,7 +2632,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) if (task == current) return 0; if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { - DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state)); + DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state)); return -EBUSY; } /* @@ -3512,7 +3513,7 @@ pfm_use_debug_registers(struct task_struct *task) if (pmu_conf->use_rr_dbregs == 0) return 0; - DPRINT(("called for [%d]\n", task->pid)); + DPRINT(("called for [%d]\n", task_pid_nr(task))); /* * do it only once @@ -3543,7 +3544,7 @@ pfm_use_debug_registers(struct task_struct *task) DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", pfm_sessions.pfs_ptrace_use_dbregs, pfm_sessions.pfs_sys_use_dbregs, - task->pid, ret)); + task_pid_nr(task), ret)); UNLOCK_PFS(flags); @@ -3568,7 +3569,7 @@ pfm_release_debug_registers(struct task_struct *task) LOCK_PFS(flags); if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { - printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid); + printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task)); ret = -1; } else { pfm_sessions.pfs_ptrace_use_dbregs--; @@ -3620,7 +3621,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) /* sanity check */ if (unlikely(task == NULL)) { - printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); + printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current)); return -EINVAL; } @@ -3629,7 +3630,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) fmt = ctx->ctx_buf_fmt; DPRINT(("restarting self %d ovfl=0x%lx\n", - task->pid, + task_pid_nr(task), ctx->ctx_ovfl_regs[0])); if (CTX_HAS_SMPL(ctx)) { @@ -3653,11 +3654,11 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); if (rst_ctrl.bits.mask_monitoring == 0) { - DPRINT(("resuming monitoring for [%d]\n", task->pid)); + DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task))); if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); } else { - DPRINT(("keeping monitoring stopped for [%d]\n", task->pid)); + DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task))); // cannot use pfm_stop_monitoring(task, regs); } @@ -3714,10 +3715,10 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * "self-monitoring". */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { - DPRINT(("unblocking [%d] \n", task->pid)); + DPRINT(("unblocking [%d] \n", task_pid_nr(task))); complete(&ctx->ctx_restart_done); } else { - DPRINT(("[%d] armed exit trap\n", task->pid)); + DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; @@ -3805,7 +3806,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ * don't bother if we are loaded and task is being debugged */ if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { - DPRINT(("debug registers already in use for [%d]\n", task->pid)); + DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task))); return -EBUSY; } @@ -3846,7 +3847,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ * is shared by all processes running on it */ if (first_time && can_access_pmu) { - DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid)); + DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task))); for (i=0; i < pmu_conf->num_ibrs; i++) { ia64_set_ibr(i, 0UL); ia64_dv_serialize_instruction(); @@ -4035,7 +4036,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) return -EBUSY; } DPRINT(("task [%d] ctx_state=%d is_system=%d\n", - PFM_CTX_TASK(ctx)->pid, + task_pid_nr(PFM_CTX_TASK(ctx)), state, is_system)); /* @@ -4093,7 +4094,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * monitoring disabled in kernel at next reschedule */ ctx->ctx_saved_psr_up = 0; - DPRINT(("task=[%d]\n", task->pid)); + DPRINT(("task=[%d]\n", task_pid_nr(task))); } return 0; } @@ -4298,11 +4299,12 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) if (is_system) { if (pfm_sessions.pfs_ptrace_use_dbregs) { - DPRINT(("cannot load [%d] dbregs in use\n", task->pid)); + DPRINT(("cannot load [%d] dbregs in use\n", + task_pid_nr(task))); ret = -EBUSY; } else { pfm_sessions.pfs_sys_use_dbregs++; - DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); + DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs)); set_dbregs = 1; } } @@ -4394,7 +4396,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) /* allow user level control */ ia64_psr(regs)->sp = 0; - DPRINT(("clearing psr.sp for [%d]\n", task->pid)); + DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task))); SET_LAST_CPU(ctx, smp_processor_id()); INC_ACTIVATION(); @@ -4429,7 +4431,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) */ SET_PMU_OWNER(task, ctx); - DPRINT(("context loaded on PMU for [%d]\n", task->pid)); + DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task))); } else { /* * when not current, task MUST be stopped, so this is safe @@ -4493,7 +4495,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg int prev_state, is_system; int ret; - DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); + DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1)); prev_state = ctx->ctx_state; is_system = ctx->ctx_fl_system; @@ -4568,7 +4570,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg */ ia64_psr(regs)->sp = 1; - DPRINT(("setting psr.sp for [%d]\n", task->pid)); + DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task))); } /* * save PMDs to context @@ -4608,7 +4610,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ctx->ctx_fl_can_restart = 0; ctx->ctx_fl_going_zombie = 0; - DPRINT(("disconnected [%d] from context\n", task->pid)); + DPRINT(("disconnected [%d] from context\n", task_pid_nr(task))); return 0; } @@ -4631,7 +4633,7 @@ pfm_exit_thread(struct task_struct *task) PROTECT_CTX(ctx, flags); - DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); + DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task))); state = ctx->ctx_state; switch(state) { @@ -4640,13 +4642,13 @@ pfm_exit_thread(struct task_struct *task) * only comes to this function if pfm_context is not NULL, i.e., cannot * be in unloaded state */ - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task)); break; case PFM_CTX_LOADED: case PFM_CTX_MASKED: ret = pfm_context_unload(ctx, NULL, 0, regs); if (ret) { - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); } DPRINT(("ctx unloaded for current state was %d\n", state)); @@ -4655,12 +4657,12 @@ pfm_exit_thread(struct task_struct *task) case PFM_CTX_ZOMBIE: ret = pfm_context_unload(ctx, NULL, 0, regs); if (ret) { - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); } free_ok = 1; break; default: - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state); + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state); break; } UNPROTECT_CTX(ctx, flags); @@ -4744,7 +4746,7 @@ recheck: DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", ctx->ctx_fd, state, - task->pid, + task_pid_nr(task), task->state, PFM_CMD_STOPPED(cmd))); /* @@ -4791,7 +4793,7 @@ recheck: */ if (PFM_CMD_STOPPED(cmd)) { if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { - DPRINT(("[%d] task not in stopped state\n", task->pid)); + DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task))); return -EBUSY; } /* @@ -4884,7 +4886,7 @@ restart_args: * limit abuse to min page size */ if (unlikely(sz > PFM_MAX_ARGSIZE)) { - printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz); + printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz); return -E2BIG; } @@ -5031,11 +5033,11 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) { int ret; - DPRINT(("entering for [%d]\n", current->pid)); + DPRINT(("entering for [%d]\n", task_pid_nr(current))); ret = pfm_context_unload(ctx, NULL, 0, regs); if (ret) { - printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret); + printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret); } /* @@ -5072,7 +5074,7 @@ pfm_handle_work(void) ctx = PFM_GET_CTX(current); if (ctx == NULL) { - printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid); + printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current)); return; } @@ -5269,7 +5271,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " "used_pmds=0x%lx\n", pmc0, - task ? task->pid: -1, + task ? task_pid_nr(task): -1, (regs ? regs->cr_iip : 0), CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", ctx->ctx_used_pmds[0])); @@ -5458,7 +5460,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str } DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", - GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, + GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1, PFM_GET_WORK_PENDING(task), ctx->ctx_fl_trap_reason, ovfl_pmds, @@ -5483,7 +5485,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str sanity_check: printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", smp_processor_id(), - task ? task->pid : -1, + task ? task_pid_nr(task) : -1, pmc0); return; @@ -5516,7 +5518,7 @@ stop_monitoring: * * Overall pretty hairy stuff.... */ - DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1)); + DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1)); pfm_clear_psr_up(); ia64_psr(regs)->up = 0; ia64_psr(regs)->sp = 1; @@ -5577,13 +5579,13 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) report_spurious1: printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n", - this_cpu, task->pid); + this_cpu, task_pid_nr(task)); pfm_unfreeze_pmu(); return -1; report_spurious2: printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", this_cpu, - task->pid); + task_pid_nr(task)); pfm_unfreeze_pmu(); return -1; } @@ -5870,7 +5872,8 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) ia64_psr(regs)->sp = 1; if (GET_PMU_OWNER() == task) { - DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid)); + DPRINT(("cleared ownership for [%d]\n", + task_pid_nr(ctx->ctx_task))); SET_PMU_OWNER(NULL, NULL); } @@ -5882,7 +5885,7 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) task->thread.pfm_context = NULL; task->thread.flags &= ~IA64_THREAD_PM_VALID; - DPRINT(("force cleanup for [%d]\n", task->pid)); + DPRINT(("force cleanup for [%d]\n", task_pid_nr(task))); } @@ -6426,7 +6429,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) if (PMD_IS_COUNTING(i)) { DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", - task->pid, + task_pid_nr(task), i, ctx->ctx_pmds[i].val, val & ovfl_val)); @@ -6448,11 +6451,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) */ if (pmc0 & (1UL << i)) { val += 1 + ovfl_val; - DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i)); + DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i)); } } - DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); + DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val)); if (is_self) ctx->th_pmds[i] = pmd_val; @@ -6793,14 +6796,14 @@ dump_pmu_state(const char *from) printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", this_cpu, from, - current->pid, + task_pid_nr(current), regs->cr_iip, current->comm); task = GET_PMU_OWNER(); ctx = GET_PMU_CTX(); - printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx); + printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx); psr = pfm_get_psr(); @@ -6848,7 +6851,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs) { struct thread_struct *thread; - DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid)); + DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task))); thread = &task->thread; diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c index ff80eab..a7af1cb 100644 --- a/arch/ia64/kernel/perfmon_default_smpl.c +++ b/arch/ia64/kernel/perfmon_default_smpl.c @@ -44,11 +44,11 @@ default_validate(struct task_struct *task, unsigned int flags, int cpu, void *da int ret = 0; if (data == NULL) { - DPRINT(("[%d] no argument passed\n", task->pid)); + DPRINT(("[%d] no argument passed\n", task_pid_nr(task))); return -EINVAL; } - DPRINT(("[%d] validate flags=0x%x CPU%d\n", task->pid, flags, cpu)); + DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu)); /* * must hold at least the buffer header + one minimally sized entry @@ -88,7 +88,7 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v hdr->hdr_count = 0UL; DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n", - task->pid, + task_pid_nr(task), buf, hdr->hdr_buf_size, sizeof(*hdr), @@ -245,7 +245,7 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru static int default_exit(struct task_struct *task, void *buf, struct pt_regs *regs) { - DPRINT(("[%d] exit(%p)\n", task->pid, buf)); + DPRINT(("[%d] exit(%p)\n", task_pid_nr(task), buf)); return 0; } diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index c613fc0..2418289 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -105,7 +105,8 @@ show_regs (struct pt_regs *regs) unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; print_modules(); - printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm); + printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), + smp_processor_id(), current->comm); printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); print_symbol("ip is at %s\n", ip); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index c5cfcfa..cbf67f1 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -208,6 +208,48 @@ static int __init register_memory(void) __initcall(register_memory); + +#ifdef CONFIG_KEXEC +static void __init setup_crashkernel(unsigned long total, int *n) +{ + unsigned long long base = 0, size = 0; + int ret; + + ret = parse_crashkernel(boot_command_line, total, + &size, &base); + if (ret == 0 && size > 0) { + if (!base) { + sort_regions(rsvd_region, *n); + base = kdump_find_rsvd_region(size, + rsvd_region, *n); + } + if (base != ~0UL) { + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(size >> 20), + (unsigned long)(base >> 20), + (unsigned long)(total >> 20)); + rsvd_region[*n].start = + (unsigned long)__va(base); + rsvd_region[*n].end = + (unsigned long)__va(base + size); + (*n)++; + crashk_res.start = base; + crashk_res.end = base + size - 1; + } + } + efi_memmap_res.start = ia64_boot_param->efi_memmap; + efi_memmap_res.end = efi_memmap_res.start + + ia64_boot_param->efi_memmap_size; + boot_param_res.start = __pa(ia64_boot_param); + boot_param_res.end = boot_param_res.start + + sizeof(*ia64_boot_param); +} +#else +static inline void __init setup_crashkernel(unsigned long total, int *n) +{} +#endif + /** * reserve_memory - setup reserved memory areas * @@ -219,6 +261,7 @@ void __init reserve_memory (void) { int n = 0; + unsigned long total_memory; /* * none of the entries in this table overlap @@ -254,50 +297,11 @@ reserve_memory (void) n++; #endif - efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); + total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); n++; -#ifdef CONFIG_KEXEC - /* crashkernel=size@offset specifies the size to reserve for a crash - * kernel. If offset is 0, then it is determined automatically. - * By reserving this memory we guarantee that linux never set's it - * up as a DMA target.Useful for holding code to do something - * appropriate after a kernel panic. - */ - { - char *from = strstr(boot_command_line, "crashkernel="); - unsigned long base, size; - if (from) { - size = memparse(from + 12, &from); - if (*from == '@') - base = memparse(from+1, &from); - else - base = 0; - if (size) { - if (!base) { - sort_regions(rsvd_region, n); - base = kdump_find_rsvd_region(size, - rsvd_region, n); - } - if (base != ~0UL) { - rsvd_region[n].start = - (unsigned long)__va(base); - rsvd_region[n].end = - (unsigned long)__va(base + size); - n++; - crashk_res.start = base; - crashk_res.end = base + size - 1; - } - } - } - efi_memmap_res.start = ia64_boot_param->efi_memmap; - efi_memmap_res.end = efi_memmap_res.start + - ia64_boot_param->efi_memmap_size; - boot_param_res.start = __pa(ia64_boot_param); - boot_param_res.end = boot_param_res.start + - sizeof(*ia64_boot_param); - } -#endif + setup_crashkernel(total_memory, &n); + /* end of memory marker */ rsvd_region[n].start = ~0UL; rsvd_region[n].end = ~0UL; diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index aeec818..cdb64cc 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -227,7 +227,7 @@ ia64_rt_sigreturn (struct sigscratch *scr) si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; - si.si_pid = current->pid; + si.si_pid = task_pid_vnr(current); si.si_uid = current->uid; si.si_addr = sc; force_sig_info(SIGSEGV, &si, current); @@ -332,7 +332,7 @@ force_sigsegv_info (int sig, void __user *addr) si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; - si.si_pid = current->pid; + si.si_pid = task_pid_vnr(current); si.si_uid = current->uid; si.si_addr = addr; force_sig_info(SIGSEGV, &si, current); diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 98cfc90..2bb8421 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -371,6 +371,11 @@ ia64_setup_printk_clock(void) ia64_printk_clock = ia64_itc_printk_clock; } +/* IA64 doesn't cache the timezone */ +void update_vsyscall_tz(void) +{ +} + void update_vsyscall(struct timespec *wall, struct clocksource *c) { unsigned long flags; diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 3aeaf15..78d65cb 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -61,7 +61,7 @@ die (const char *str, struct pt_regs *regs, long err) if (++die.lock_owner_depth < 3) { printk("%s[%d]: %s %ld [%d]\n", - current->comm, current->pid, str, err, ++die_counter); + current->comm, task_pid_nr(current), str, err, ++die_counter); (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); show_regs(regs); } else @@ -315,7 +315,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) last.time = current_jiffies + 5 * HZ; printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", - current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); + current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr); } } } @@ -453,7 +453,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, if (code == 8) { # ifdef CONFIG_IA64_PRINT_HAZARDS printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), regs.cr_iip + ia64_psr(®s)->ri, regs.pr); # endif return; diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index fe6aa5a..2173de9 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -1340,7 +1340,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) size_t len; len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, " - "ip=0x%016lx\n\r", current->comm, current->pid, + "ip=0x%016lx\n\r", current->comm, + task_pid_nr(current), ifa, regs->cr_iip + ipsr->ri); /* * Don't call tty_write_message() if we're in the kernel; we might @@ -1363,7 +1364,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) "administrator\n" "echo 0 > /proc/sys/kernel/ignore-" "unaligned-usertrap to re-enable\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); } } } else { diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 32f2625..7571076 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -274,7 +274,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 3e10152..c6c19bf 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -127,8 +127,8 @@ ia64_init_addr_space (void) vma->vm_mm = current->mm; vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; - vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); diff --git a/arch/ia64/oprofile/Kconfig b/arch/ia64/oprofile/Kconfig deleted file mode 100644 index 97271ab..0000000 --- a/arch/ia64/oprofile/Kconfig +++ /dev/null @@ -1,20 +0,0 @@ -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - Due to firmware bugs, you may need to use the "nohalt" boot - option if you're using OProfile with the hardware performance - counters. - - If unsure, say N. - diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c index e58fcad..a5df672 100644 --- a/arch/ia64/sn/kernel/xpnet.c +++ b/arch/ia64/sn/kernel/xpnet.c @@ -269,8 +269,9 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) skb->protocol = eth_type_trans(skb, xpnet_device); skb->ip_summed = CHECKSUM_UNNECESSARY; - dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " - "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", + dev_dbg(xpnet, "passing skb to network layer\n" + KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len); @@ -576,10 +577,10 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); msg->buf_pa = __pa(start_addr); - dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa=" - "0x%lx, msg->size=%u, msg->leadin_ignore=%u, " - "msg->tailout_ignore=%u\n", dest_partid, - XPC_NET_CHANNEL, msg->buf_pa, msg->size, + dev_dbg(xpnet, "sending XPC message to %d:%d\n" + KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " + "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", + dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, msg->leadin_ignore, msg->tailout_ignore); diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index bd5fe76..ab9a264 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -426,7 +426,7 @@ source "drivers/Kconfig" source "fs/Kconfig" -source "arch/m32r/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" source "arch/m32r/Kconfig.debug" diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index f8d8650..d0c5b0b 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -71,7 +71,7 @@ skip: } /* - * do_IRQ handles all normal device IRQ's (the special + * do_IRQ handles all normal device IRQs (the special * SMP cross-CPU interrupts have their own specific * handlers). */ diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 916faf6..a753d79 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c @@ -358,7 +358,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* Reenable any watchpoints before delivering the + /* Re-enable any watchpoints before delivering the * signal to user space. The processor register will * have been cleared if the watchpoint triggered * inside the kernel. diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 3601291..c837bc13 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -202,7 +202,7 @@ void smp_flush_cache_all_interrupt(void) } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* TLB flush request Routins */ +/* TLB flush request Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* @@ -378,7 +378,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) * Name: flush_tlb_others * * Description: This routine requests other CPU to execute flush TLB. - * 1.Setup parmeters. + * 1.Setup parameters. * 2.Send 'INVALIDATE_TLB_IPI' to other CPU. * Request other CPU to execute 'smp_invalidate_interrupt()'. * 3.Wait for other CPUs operation finished. @@ -502,7 +502,7 @@ void smp_invalidate_interrupt(void) } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* Stop CPU request Routins */ +/* Stop CPU request Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* @@ -566,7 +566,7 @@ static void stop_this_cpu(void *dummy) } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* Call function Routins */ +/* Call function Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* @@ -690,7 +690,7 @@ void smp_call_function_interrupt(void) } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* Timer Routins */ +/* Timer Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* @@ -802,7 +802,7 @@ void smp_local_timer_interrupt(void) } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* Send IPI Routins */ +/* Send IPI Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* @@ -814,7 +814,7 @@ void smp_local_timer_interrupt(void) * * Arguments: ipi_num - Number of IPI * try - 0 : Send IPI certainly. - * !0 : The following IPI is not sended when Target CPU + * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: void (cannot fail) @@ -844,7 +844,7 @@ void send_IPI_allbutself(int ipi_num, int try) * Arguments: cpu_mask - Bitmap of target CPUs logical ID * ipi_num - Number of IPI * try - 0 : Send IPI certainly. - * !0 : The following IPI is not sended when Target CPU + * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: void (cannot fail) @@ -885,7 +885,7 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) * Arguments: cpu_mask - Bitmap of target CPUs physical ID * ipi_num - Number of IPI * try - 0 : Send IPI certainly. - * !0 : The following IPI is not sended when Target CPU + * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: IPICRi regster value. diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 9dae410..0e383da 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -133,7 +133,7 @@ static void map_cpu_to_physid(int, int); static void unmap_cpu_to_physid(int, int); /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* Boot up APs Routins : BSP */ +/* Boot up APs Routines : BSP */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ void __devinit smp_prepare_boot_cpu(void) { @@ -404,7 +404,7 @@ void __init smp_cpus_done(unsigned int max_cpus) } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* Activate a secondary processor Routins */ +/* Activate a secondary processor Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* @@ -509,7 +509,7 @@ static void __init smp_online(void) } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -/* Boot up CPUs common Routins */ +/* Boot up CPUs common Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ static void __init show_mp_info(int nr_cpu) { diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c index 0fc2efe..6d7a80f 100644 --- a/arch/m32r/kernel/sys_m32r.c +++ b/arch/m32r/kernel/sys_m32r.c @@ -214,7 +214,7 @@ asmlinkage int sys_uname(struct old_utsname __user * name) asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) { - /* This should flush more selectivly ... */ + /* This should flush more selectively ... */ _flush_cache_all(); return 0; } diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 97e0b1c..89ba4a0 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -196,7 +196,7 @@ static void show_registers(struct pt_regs *regs) printk("SPI: %08lx\n", sp); } printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)", - current->comm, current->pid, 0xffff & i, 4096+(unsigned long)current); + current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current); /* * When in-kernel, we also print out the stack and code at the diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 70a766a..4a71df4 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -271,7 +271,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(tsk)) { + if (is_global_init(tsk)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/m32r/oprofile/Kconfig b/arch/m32r/oprofile/Kconfig deleted file mode 100644 index 19d3773..0000000 --- a/arch/m32r/oprofile/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ - -menu "Profiling support" - depends on EXPERIMENTAL - -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -endmenu - diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 20a9c08..01dee84 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -683,6 +683,8 @@ endmenu source "fs/Kconfig" +source "kernel/Kconfig.instrumentation" + source "arch/m68k/Kconfig.debug" source "security/Kconfig" diff --git a/arch/m68k/amiga/pcmcia.c b/arch/m68k/amiga/pcmcia.c index fc57c6e..186662c 100644 --- a/arch/m68k/amiga/pcmcia.c +++ b/arch/m68k/amiga/pcmcia.c @@ -33,7 +33,7 @@ void pcmcia_reset(void) /* copy a tuple, including tuple header. return nb bytes copied */ -/* be carefull as this may trigger a GAYLE_IRQ_WR interrupt ! */ +/* be careful as this may trigger a GAYLE_IRQ_WR interrupt ! */ int pcmcia_copy_tuple(unsigned char tuple_id, void *tuple, int max_len) { diff --git a/arch/m68k/ifpsp060/CHANGES b/arch/m68k/ifpsp060/CHANGES index c1e712d..ba96596 100644 --- a/arch/m68k/ifpsp060/CHANGES +++ b/arch/m68k/ifpsp060/CHANGES @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/MISC b/arch/m68k/ifpsp060/MISC index b7e644b..1a63913 100644 --- a/arch/m68k/ifpsp060/MISC +++ b/arch/m68k/ifpsp060/MISC @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/README b/arch/m68k/ifpsp060/README index e3bced4..f6f8f5c 100644 --- a/arch/m68k/ifpsp060/README +++ b/arch/m68k/ifpsp060/README @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/TEST.DOC b/arch/m68k/ifpsp060/TEST.DOC index 5e5900c..1ba3aef 100644 --- a/arch/m68k/ifpsp060/TEST.DOC +++ b/arch/m68k/ifpsp060/TEST.DOC @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/fplsp.doc b/arch/m68k/ifpsp060/fplsp.doc index fb637c4..89730a9 100644 --- a/arch/m68k/ifpsp060/fplsp.doc +++ b/arch/m68k/ifpsp060/fplsp.doc @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/fpsp.doc b/arch/m68k/ifpsp060/fpsp.doc index 4083152..23d513f 100644 --- a/arch/m68k/ifpsp060/fpsp.doc +++ b/arch/m68k/ifpsp060/fpsp.doc @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/fskeleton.S b/arch/m68k/ifpsp060/fskeleton.S index a45a4ff..0a1ae4f 100644 --- a/arch/m68k/ifpsp060/fskeleton.S +++ b/arch/m68k/ifpsp060/fskeleton.S @@ -4,7 +4,7 @@ |M68060 Software Package |Production Release P1.00 -- October 10, 1994 | -|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. | |THE SOFTWARE is provided on an "AS IS" basis and without warranty. |To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/ilsp.doc b/arch/m68k/ifpsp060/ilsp.doc index f6fae6d..4e6292f 100644 --- a/arch/m68k/ifpsp060/ilsp.doc +++ b/arch/m68k/ifpsp060/ilsp.doc @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/iskeleton.S b/arch/m68k/ifpsp060/iskeleton.S index b2dbdf5..91a9c65 100644 --- a/arch/m68k/ifpsp060/iskeleton.S +++ b/arch/m68k/ifpsp060/iskeleton.S @@ -4,7 +4,7 @@ |M68060 Software Package |Production Release P1.00 -- October 10, 1994 | -|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. | |THE SOFTWARE is provided on an "AS IS" basis and without warranty. |To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/isp.doc b/arch/m68k/ifpsp060/isp.doc index 5a90fde..9dadd72 100644 --- a/arch/m68k/ifpsp060/isp.doc +++ b/arch/m68k/ifpsp060/isp.doc @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/os.S b/arch/m68k/ifpsp060/os.S index aa4df87..7a0d6e4 100644 --- a/arch/m68k/ifpsp060/os.S +++ b/arch/m68k/ifpsp060/os.S @@ -4,7 +4,7 @@ |M68060 Software Package |Production Release P1.00 -- October 10, 1994 | -|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. | |THE SOFTWARE is provided on an "AS IS" basis and without warranty. |To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/src/fplsp.S b/arch/m68k/ifpsp060/src/fplsp.S index fdb79b9..3b7ea2d 100644 --- a/arch/m68k/ifpsp060/src/fplsp.S +++ b/arch/m68k/ifpsp060/src/fplsp.S @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S index 3b597a9..6c1a9a2 100644 --- a/arch/m68k/ifpsp060/src/fpsp.S +++ b/arch/m68k/ifpsp060/src/fpsp.S @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/src/ftest.S b/arch/m68k/ifpsp060/src/ftest.S index 2edcbae..1f94791 100644 --- a/arch/m68k/ifpsp060/src/ftest.S +++ b/arch/m68k/ifpsp060/src/ftest.S @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/src/ilsp.S b/arch/m68k/ifpsp060/src/ilsp.S index afa7422..970abaf 100644 --- a/arch/m68k/ifpsp060/src/ilsp.S +++ b/arch/m68k/ifpsp060/src/ilsp.S @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S index b269091..6dccda7 100644 --- a/arch/m68k/ifpsp060/src/isp.S +++ b/arch/m68k/ifpsp060/src/isp.S @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/src/itest.S b/arch/m68k/ifpsp060/src/itest.S index ba4a30c..beca47e 100644 --- a/arch/m68k/ifpsp060/src/itest.S +++ b/arch/m68k/ifpsp060/src/itest.S @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/ifpsp060/src/pfpsp.S b/arch/m68k/ifpsp060/src/pfpsp.S index 0c997c4..51b9f7d 100644 --- a/arch/m68k/ifpsp060/src/pfpsp.S +++ b/arch/m68k/ifpsp060/src/pfpsp.S @@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division M68060 Software Package Production Release P1.00 -- October 10, 1994 -M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. +M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved. THE SOFTWARE is provided on an "AS IS" basis and without warranty. To the maximum extent permitted by applicable law, diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 4e2752a..97f556f 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -900,7 +900,7 @@ void show_registers(struct pt_regs *regs) regs->d4, regs->d5, regs->a0, regs->a1); printk("Process %s (pid: %d, task=%p)\n", - current->comm, current->pid, current); + current->comm, task_pid_nr(current), current); addr = (unsigned long)&fp->un; printk("Frame format=%X ", regs->format); switch (regs->format) { @@ -1038,7 +1038,7 @@ void bad_super_trap (struct frame *fp) fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); } - printk ("Current process id is %d\n", current->pid); + printk ("Current process id is %d\n", task_pid_nr(current)); die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); } diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 8547dbc..01b468b 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -284,7 +284,7 @@ static struct mac_model mac_data_table[] = { }, /* - * Weirdified MacII hardware - all subtley different. Gee thanks + * Weirdified MacII hardware - all subtly different. Gee thanks * Apple. All these boxes seem to have VIA2 in a different place to * the MacII (+1A000 rather than +4000) * CSA: see http://developer.apple.com/technotes/hw/hw_09.html @@ -707,7 +707,7 @@ static struct mac_model mac_data_table[] = { * All of these probably have onboard SONIC in the Dock which * means we'll have to probe for it eventually. * - * Are these reallly MAC_VIA_IIci? The developer notes for the + * Are these really MAC_VIA_IIci? The developer notes for the * Duos show pretty much the same custom parts as in most of * the other PowerBooks which would imply MAC_VIA_QUADRA. */ diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 0cea21f..5b2799e 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -100,7 +100,7 @@ * finished; this function moves the message state to MSG_COMPLETE and signals * the IOP. This two-step process is provided to allow the handler to defer * message processing to a bottom-half handler if the processing will take - * a signifigant amount of time (handlers are called at interrupt time so they + * a significant amount of time (handlers are called at interrupt time so they * should execute quickly.) */ @@ -120,7 +120,7 @@ /*#define DEBUG_IOP*/ -/* Set to nonezero if the IOPs are present. Set by iop_init() */ +/* Set to non-zero if the IOPs are present. Set by iop_init() */ int iop_scc_present,iop_ism_present; diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index d7be169..50603d3 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c @@ -8,7 +8,7 @@ * * 990502 (jmt) - Major rewrite for new interrupt architecture as well as some * recent insights into OSS operational details. - * 990610 (jmt) - Now taking fulll advantage of the OSS. Interrupts are mapped + * 990610 (jmt) - Now taking full advantage of the OSS. Interrupts are mapped * to mostly match the A/UX interrupt scheme supported on the * VIA side. Also added support for enabling the ISM irq again * since we now have a functional IOP manager. diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index d5cac72..8df270e 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -1,7 +1,7 @@ /* * 6522 Versatile Interface Adapter (VIA) * - * There are two of these on the Mac II. Some IRQ's are vectored + * There are two of these on the Mac II. Some IRQs are vectored * via them as are assorted bits and bobs - eg RTC, ADB. * * CSA: Motorola seems to have removed documentation on the 6522 from diff --git a/arch/m68k/math-emu/fp_log.c b/arch/m68k/math-emu/fp_log.c index 87b4f01..b1033ae 100644 --- a/arch/m68k/math-emu/fp_log.c +++ b/arch/m68k/math-emu/fp_log.c @@ -65,7 +65,7 @@ fp_fsqrt(struct fp_ext *dest, struct fp_ext *src) fp_copy_ext(&src2, dest); /* - * The taylor row arround a for sqrt(x) is: + * The taylor row around a for sqrt(x) is: * sqrt(x) = sqrt(a) + 1/(2*sqrt(a))*(x-a) + R * With a=1 this gives: * sqrt(x) = 1 + 1/2*(x-1) diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index eaa6186..f493f03 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -180,7 +180,7 @@ good_area: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c index ad3ed1f..46161ce 100644 --- a/arch/m68k/q40/q40ints.c +++ b/arch/m68k/q40/q40ints.c @@ -184,7 +184,7 @@ static struct IRQ_TABLE eirqs[] = { }; /* complain only this many times about spurious ints : */ -static int ccleirq=60; /* ISA dev IRQ's*/ +static int ccleirq=60; /* ISA dev IRQs*/ /*static int cclirq=60;*/ /* internal */ /* FIXME: add shared ints,mask,unmask,probing.... */ @@ -234,7 +234,7 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp) * There is a little mess wrt which IRQ really caused this irq request. The * main problem is that IIRQ_REG and EIRQ_REG reflect the state when they * are read - which is long after the request came in. In theory IRQs should - * not just go away but they occassionally do + * not just go away but they occasionally do */ if (irq > 4 && irq <= 15 && mext_disabled) { /*aliased_irq++;*/ diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index 7a0e3a2..fb0f6a2 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -239,7 +239,7 @@ void clear_context(unsigned long context) /* gets an empty context. if full, kills the next context listed to die first */ /* This context invalidation scheme is, well, totally arbitrary, I'm - sure it could be much more intellegent... but it gets the job done + sure it could be much more intelligent... but it gets the job done for now without much overhead in making it's decision. */ /* todo: come up with optimized scheme for flushing contexts */ unsigned long get_free_context(struct mm_struct *mm) diff --git a/arch/m68k/tools/amiga/dmesg.c b/arch/m68k/tools/amiga/dmesg.c index e892748..7340f5b 100644 --- a/arch/m68k/tools/amiga/dmesg.c +++ b/arch/m68k/tools/amiga/dmesg.c @@ -3,7 +3,7 @@ * in Chip RAM with the kernel command * line option `debug=mem'. * - * © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org> + * © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org> * * * Usage: diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index 185906b..f52c627 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig @@ -696,6 +696,8 @@ source "drivers/Kconfig" source "fs/Kconfig" +source "kernel/Kconfig.instrumentation" + source "arch/m68knommu/Kconfig.debug" source "security/Kconfig" diff --git a/arch/m68knommu/platform/5307/pit.c b/arch/m68knommu/platform/5307/pit.c index e53c446..f18352f 100644 --- a/arch/m68knommu/platform/5307/pit.c +++ b/arch/m68knommu/platform/5307/pit.c @@ -83,7 +83,7 @@ unsigned long coldfire_pit_offset(void) /* * If we are still in the first half of the upcount and a - * timer interupt is pending, then add on a ticks worth of time. + * timer interrupt is pending, then add on a ticks worth of time. */ offset = ((pmr - pcntr) * (1000000 / HZ)) / pmr; if ((offset < (1000000 / HZ / 2)) && (*ipr & MCFPIT_IMR_IBIT)) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 235d451..3ecff5e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -21,6 +21,7 @@ config MACH_ALCHEMY config BASLER_EXCITE bool "Basler eXcite smart camera" + select CEVT_R4K select DMA_COHERENT select HW_HAS_PCI select IRQ_CPU @@ -47,6 +48,7 @@ config BASLER_EXCITE_PROTOTYPE config BCM47XX bool "BCM47XX based boards" + select CEVT_R4K select DMA_NONCOHERENT select HW_HAS_PCI select IRQ_CPU @@ -63,6 +65,7 @@ config BCM47XX config MIPS_COBALT bool "Cobalt Server" + select CEVT_R4K select DMA_NONCOHERENT select HW_HAS_PCI select I8253 @@ -80,6 +83,7 @@ config MIPS_COBALT config MACH_DECSTATION bool "DECstations" select BOOT_ELF32 + select CEVT_R4K select DMA_NONCOHERENT select NO_IOPORT select IRQ_CPU @@ -111,6 +115,7 @@ config MACH_JAZZ select ARC select ARC32 select ARCH_MAY_HAVE_PC_FDC + select CEVT_R4K select GENERIC_ISA_DMA select IRQ_CPU select I8253 @@ -130,6 +135,7 @@ config MACH_JAZZ config LASAT bool "LASAT Networks platforms" + select CEVT_R4K select DMA_NONCOHERENT select SYS_HAS_EARLY_PRINTK select HW_HAS_PCI @@ -146,6 +152,7 @@ config LASAT config LEMOTE_FULONG bool "Lemote Fulong mini-PC" select ARCH_SPARSEMEM_ENABLE + select CEVT_R4K select SYS_HAS_CPU_LOONGSON2 select DMA_NONCOHERENT select BOOT_ELF32 @@ -170,6 +177,7 @@ config LEMOTE_FULONG config MIPS_ATLAS bool "MIPS Atlas board" select BOOT_ELF32 + select CEVT_R4K select DMA_NONCOHERENT select SYS_HAS_EARLY_PRINTK select IRQ_CPU @@ -200,6 +208,7 @@ config MIPS_MALTA bool "MIPS Malta board" select ARCH_MAY_HAVE_PC_FDC select BOOT_ELF32 + select CEVT_R4K select DMA_NONCOHERENT select GENERIC_ISA_DMA select IRQ_CPU @@ -230,6 +239,7 @@ config MIPS_MALTA config MIPS_SEAD bool "MIPS SEAD board" + select CEVT_R4K select IRQ_CPU select DMA_NONCOHERENT select SYS_HAS_EARLY_PRINTK @@ -248,6 +258,7 @@ config MIPS_SEAD config MIPS_SIM bool 'MIPS simulator (MIPSsim)' + select CEVT_R4K select DMA_NONCOHERENT select SYS_HAS_EARLY_PRINTK select IRQ_CPU @@ -265,6 +276,7 @@ config MIPS_SIM config MARKEINS bool "NEC EMMA2RH Mark-eins" + select CEVT_R4K select DMA_NONCOHERENT select HW_HAS_PCI select IRQ_CPU @@ -279,6 +291,7 @@ config MARKEINS config MACH_VR41XX bool "NEC VR4100 series based machines" + select CEVT_R4K select SYS_HAS_CPU_VR41XX select GENERIC_HARDIRQS_NO__DO_IRQ @@ -315,6 +328,7 @@ config PMC_MSP config PMC_YOSEMITE bool "PMC-Sierra Yosemite eval board" + select CEVT_R4K select DMA_COHERENT select HW_HAS_PCI select IRQ_CPU @@ -335,6 +349,7 @@ config PMC_YOSEMITE config QEMU bool "Qemu" + select CEVT_R4K select DMA_COHERENT select GENERIC_ISA_DMA select HAVE_STD_PC_SERIAL_PORT @@ -365,6 +380,7 @@ config SGI_IP22 select ARC select ARC32 select BOOT_ELF32 + select CEVT_R4K select DMA_NONCOHERENT select HW_HAS_EISA select I8253 @@ -409,6 +425,7 @@ config SGI_IP32 select ARC select ARC32 select BOOT_ELF32 + select CEVT_R4K select DMA_NONCOHERENT select HW_HAS_PCI select IRQ_CPU @@ -536,6 +553,7 @@ config SNI_RM select ARC32 if CPU_LITTLE_ENDIAN select ARCH_MAY_HAVE_PC_FDC select BOOT_ELF32 + select CEVT_R4K select DMA_NONCOHERENT select GENERIC_ISA_DMA select HW_HAS_EISA @@ -577,6 +595,7 @@ config TOSHIBA_JMR3927 config TOSHIBA_RBTX4927 bool "Toshiba RBTX49[23]7 board" + select CEVT_R4K select DMA_NONCOHERENT select HAS_TXX9_SERIAL select HW_HAS_PCI @@ -597,6 +616,7 @@ config TOSHIBA_RBTX4927 config TOSHIBA_RBTX4938 bool "Toshiba RBTX4938 board" + select CEVT_R4K select DMA_NONCOHERENT select HAS_TXX9_SERIAL select HW_HAS_PCI @@ -616,6 +636,7 @@ config TOSHIBA_RBTX4938 config WR_PPMC bool "Wind River PPMC board" + select CEVT_R4K select IRQ_CPU select BOOT_ELF32 select DMA_NONCOHERENT @@ -708,6 +729,9 @@ config ARCH_MAY_HAVE_PC_FDC config BOOT_RAW bool +config CEVT_R4K + bool + config CFE bool @@ -1788,7 +1812,7 @@ config KEXEC but it is independent of the system firmware. And like a reboot you can start any kernel with it, not just Linux. - The name comes from the similiarity to the exec system call. + The name comes from the similarity to the exec system call. It is an ongoing process to be certain the hardware in a machine is properly shutdown, so do not be surprised if this code does not @@ -1981,7 +2005,7 @@ source "drivers/Kconfig" source "fs/Kconfig" -source "arch/mips/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" source "arch/mips/Kconfig.debug" diff --git a/arch/mips/au1000/Kconfig b/arch/mips/au1000/Kconfig index a23d415..b36cec58 100644 --- a/arch/mips/au1000/Kconfig +++ b/arch/mips/au1000/Kconfig @@ -137,6 +137,7 @@ config SOC_AU1200 config SOC_AU1X00 bool select 64BIT_PHYS_ADDR + select CEVT_R4K select IRQ_CPU select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c index 5f48b06..bdf00e2 100644 --- a/arch/mips/au1000/pb1200/irqmap.c +++ b/arch/mips/au1000/pb1200/irqmap.c @@ -36,8 +36,8 @@ #include <linux/slab.h> #include <linux/random.h> #include <linux/delay.h> +#include <linux/bitops.h> -#include <asm/bitops.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/mipsregs.h> diff --git a/arch/mips/basler/excite/excite_irq.c b/arch/mips/basler/excite/excite_irq.c index 1ecab63..4903e06 100644 --- a/arch/mips/basler/excite/excite_irq.c +++ b/arch/mips/basler/excite/excite_irq.c @@ -29,7 +29,7 @@ #include <linux/timex.h> #include <linux/slab.h> #include <linux/random.h> -#include <asm/bitops.h> +#include <linux/bitops.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/arch/mips/bcm47xx/time.c b/arch/mips/bcm47xx/time.c index 0ab4676..0c6f47b 100644 --- a/arch/mips/bcm47xx/time.c +++ b/arch/mips/bcm47xx/time.c @@ -46,10 +46,3 @@ void __init plat_time_init(void) /* Set MIPS counter frequency for fixed_rate_gettimeoffset() */ mips_hpt_frequency = hz; } - -void __init -plat_timer_setup(struct irqaction *irq) -{ - /* Enable the timer interrupt */ - setup_irq(7, irq); -} diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 49bcc58..892d4c3 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -175,6 +175,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 +CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_SYSFS_DEPRECATED=y CONFIG_RELAY=y diff --git a/arch/mips/configs/mipssim_defconfig b/arch/mips/configs/mipssim_defconfig index 86dcb74..61b72f5 100644 --- a/arch/mips/configs/mipssim_defconfig +++ b/arch/mips/configs/mipssim_defconfig @@ -1,71 +1,68 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.20 -# Tue Feb 20 21:47:35 2007 +# Linux kernel version: 2.6.23 +# Thu Oct 18 22:45:52 2007 # CONFIG_MIPS=y # # Machine selection # -CONFIG_ZONE_DMA=y -# CONFIG_MIPS_MTX1 is not set -# CONFIG_MIPS_BOSPORUS is not set -# CONFIG_MIPS_PB1000 is not set -# CONFIG_MIPS_PB1100 is not set -# CONFIG_MIPS_PB1500 is not set -# CONFIG_MIPS_PB1550 is not set -# CONFIG_MIPS_PB1200 is not set -# CONFIG_MIPS_DB1000 is not set -# CONFIG_MIPS_DB1100 is not set -# CONFIG_MIPS_DB1500 is not set -# CONFIG_MIPS_DB1550 is not set -# CONFIG_MIPS_DB1200 is not set -# CONFIG_MIPS_MIRAGE is not set +# CONFIG_MACH_ALCHEMY is not set # CONFIG_BASLER_EXCITE is not set +# CONFIG_BCM47XX is not set # CONFIG_MIPS_COBALT is not set # CONFIG_MACH_DECSTATION is not set # CONFIG_MACH_JAZZ is not set +# CONFIG_LASAT is not set +# CONFIG_LEMOTE_FULONG is not set # CONFIG_MIPS_ATLAS is not set # CONFIG_MIPS_MALTA is not set # CONFIG_MIPS_SEAD is not set -# CONFIG_WR_PPMC is not set CONFIG_MIPS_SIM=y -# CONFIG_MOMENCO_JAGUAR_ATX is not set -# CONFIG_MIPS_XXS1500 is not set +# CONFIG_MARKEINS is not set +# CONFIG_MACH_VR41XX is not set # CONFIG_PNX8550_JBS is not set # CONFIG_PNX8550_STB810 is not set -# CONFIG_MACH_VR41XX is not set +# CONFIG_PMC_MSP is not set # CONFIG_PMC_YOSEMITE is not set # CONFIG_QEMU is not set -# CONFIG_MARKEINS is not set # CONFIG_SGI_IP22 is not set # CONFIG_SGI_IP27 is not set # CONFIG_SGI_IP32 is not set -# CONFIG_SIBYTE_BIGSUR is not set +# CONFIG_SIBYTE_CRHINE is not set +# CONFIG_SIBYTE_CARMEL is not set +# CONFIG_SIBYTE_CRHONE is not set +# CONFIG_SIBYTE_RHONE is not set # CONFIG_SIBYTE_SWARM is not set +# CONFIG_SIBYTE_LITTLESUR is not set # CONFIG_SIBYTE_SENTOSA is not set -# CONFIG_SIBYTE_RHONE is not set -# CONFIG_SIBYTE_CARMEL is not set # CONFIG_SIBYTE_PTSWARM is not set -# CONFIG_SIBYTE_LITTLESUR is not set -# CONFIG_SIBYTE_CRHINE is not set -# CONFIG_SIBYTE_CRHONE is not set +# CONFIG_SIBYTE_BIGSUR is not set # CONFIG_SNI_RM is not set # CONFIG_TOSHIBA_JMR3927 is not set # CONFIG_TOSHIBA_RBTX4927 is not set # CONFIG_TOSHIBA_RBTX4938 is not set +# CONFIG_WR_PPMC is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_ARCH_HAS_ILOG2_U32 is not set # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CMOS_UPDATE=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y # CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set +CONFIG_BOOT_RAW=y +CONFIG_CEVT_R4K=y CONFIG_DMA_NONCOHERENT=y CONFIG_DMA_NEED_PCI_MAP_STATE=y +CONFIG_EARLY_PRINTK=y +CONFIG_SYS_HAS_EARLY_PRINTK=y +# CONFIG_HOTPLUG_CPU is not set +# CONFIG_NO_IOPORT is not set # CONFIG_CPU_BIG_ENDIAN is not set CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y @@ -76,6 +73,11 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5 # # CPU selection # +# CONFIG_TICK_ONESHOT is not set +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +# CONFIG_CPU_LOONGSON2 is not set CONFIG_CPU_MIPS32_R1=y # CONFIG_CPU_MIPS32_R2 is not set # CONFIG_CPU_MIPS64_R1 is not set @@ -115,8 +117,8 @@ CONFIG_CPU_HAS_PREFETCH=y CONFIG_MIPS_MT_DISABLED=y # CONFIG_MIPS_MT_SMP is not set # CONFIG_MIPS_MT_SMTC is not set +CONFIG_SYS_SUPPORTS_MULTITHREADING=y # CONFIG_MIPS_VPE_LOADER is not set -# CONFIG_64BIT_PHYS_ADDR is not set CONFIG_CPU_HAS_LLSC=y CONFIG_CPU_HAS_SYNC=y CONFIG_GENERIC_HARDIRQS=y @@ -130,50 +132,52 @@ CONFIG_FLATMEM_MANUAL=y CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y # CONFIG_SPARSEMEM_STATIC is not set +# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set CONFIG_SPLIT_PTLOCK_CPUS=4 # CONFIG_RESOURCES_64BIT is not set -CONFIG_ZONE_DMA_FLAG=1 +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y # CONFIG_HZ_48 is not set -# CONFIG_HZ_100 is not set +CONFIG_HZ_100=y # CONFIG_HZ_128 is not set # CONFIG_HZ_250 is not set # CONFIG_HZ_256 is not set -CONFIG_HZ_1000=y +# CONFIG_HZ_1000 is not set # CONFIG_HZ_1024 is not set CONFIG_SYS_SUPPORTS_ARBIT_HZ=y -CONFIG_HZ=1000 +CONFIG_HZ=100 CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set # CONFIG_KEXEC is not set +# CONFIG_SECCOMP is not set CONFIG_LOCKDEP_SUPPORT=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" # -# Code maturity level options +# General setup # CONFIG_EXPERIMENTAL=y CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 - -# -# General setup -# CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y -CONFIG_SWAP=y +# CONFIG_SWAP is not set CONFIG_SYSVIPC=y -# CONFIG_IPC_NS is not set CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set -# CONFIG_UTS_NS is not set +# CONFIG_USER_NS is not set # CONFIG_AUDIT is not set # CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FAIR_USER_SCHED=y CONFIG_SYSFS_DEPRECATED=y # CONFIG_RELAY is not set +# CONFIG_BLK_DEV_INITRD is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_EMBEDDED=y @@ -187,31 +191,29 @@ CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_BASE_FULL=y CONFIG_FUTEX=y +CONFIG_ANON_INODES=y CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_EVENTFD=y CONFIG_SHMEM=y -CONFIG_SLAB=y CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set CONFIG_RT_MUTEXES=y # CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 -# CONFIG_SLOB is not set - -# -# Loadable module support -# CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_KMOD=y - -# -# Block layer -# CONFIG_BLOCK=y # CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_LSF is not set +# CONFIG_BLK_DEV_BSG is not set # # IO Schedulers @@ -229,18 +231,11 @@ CONFIG_DEFAULT_IOSCHED="anticipatory" # # Bus options (PCI, PCMCIA, EISA, ISA, TC) # +# CONFIG_ARCH_SUPPORTS_MSI is not set CONFIG_MMU=y - -# -# PCCARD (PCMCIA/CardBus) support -# # CONFIG_PCCARD is not set # -# PCI Hotplug Support -# - -# # Executable file formats # CONFIG_BINFMT_ELF=y @@ -250,9 +245,8 @@ CONFIG_TRAD_SIGNALS=y # # Power management options # -CONFIG_PM=y -# CONFIG_PM_LEGACY is not set -# CONFIG_PM_DEBUG is not set +# CONFIG_PM is not set +CONFIG_SUSPEND_UP_POSSIBLE=y # # Networking @@ -262,75 +256,50 @@ CONFIG_NET=y # # Networking options # -# CONFIG_NETDEBUG is not set CONFIG_PACKET=y CONFIG_PACKET_MMAP=y CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -CONFIG_XFRM_MIGRATE=y -CONFIG_NET_KEY=y -CONFIG_NET_KEY_MIGRATE=y +# CONFIG_NET_KEY is not set CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_ASK_IP_FIB_HASH=y # CONFIG_IP_FIB_TRIE is not set CONFIG_IP_FIB_HASH=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set -CONFIG_IP_ROUTE_VERBOSE=y +# CONFIG_IP_MULTIPLE_TABLES is not set +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y # CONFIG_IP_PNP_RARP is not set # CONFIG_NET_IPIP is not set # CONFIG_NET_IPGRE is not set -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y +# CONFIG_IP_MROUTE is not set # CONFIG_ARPD is not set -CONFIG_SYN_COOKIES=y +# CONFIG_SYN_COOKIES is not set # CONFIG_INET_AH is not set # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set # CONFIG_INET_XFRM_TUNNEL is not set # CONFIG_INET_TUNNEL is not set -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=y CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y +# CONFIG_TCP_MD5SIG is not set # CONFIG_IPV6 is not set # CONFIG_INET6_XFRM_TUNNEL is not set # CONFIG_INET6_TUNNEL is not set -CONFIG_NETWORK_SECMARK=y +# CONFIG_NETWORK_SECMARK is not set # CONFIG_NETFILTER is not set - -# -# DCCP Configuration (EXPERIMENTAL) -# # CONFIG_IP_DCCP is not set - -# -# SCTP Configuration (EXPERIMENTAL) -# -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_MSG is not set -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_HMAC_NONE is not set -# CONFIG_SCTP_HMAC_SHA1 is not set -CONFIG_SCTP_HMAC_MD5=y - -# -# TIPC Configuration (EXPERIMENTAL) -# +# CONFIG_IP_SCTP is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -347,44 +316,7 @@ CONFIG_SCTP_HMAC_MD5=y # # QoS and/or fair queueing # -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_FIFO=y -CONFIG_NET_SCH_CLK_JIFFIES=y -# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set -# CONFIG_NET_SCH_CLK_CPU is not set - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_INGRESS=m - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_ROUTE=y -# CONFIG_NET_CLS_FW is not set -# CONFIG_NET_CLS_U32 is not set -# CONFIG_NET_CLS_RSVP is not set -# CONFIG_NET_CLS_RSVP6 is not set -# CONFIG_NET_EMATCH is not set -# CONFIG_NET_CLS_ACT is not set -# CONFIG_NET_CLS_POLICE is not set -CONFIG_NET_ESTIMATOR=y +# CONFIG_NET_SCHED is not set # # Network testing @@ -393,8 +325,17 @@ CONFIG_NET_ESTIMATOR=y # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set + +# +# Wireless +# +# CONFIG_CFG80211 is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_MAC80211 is not set # CONFIG_IEEE80211 is not set -CONFIG_FIB_RULES=y +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set # # Device Drivers @@ -403,52 +344,25 @@ CONFIG_FIB_RULES=y # # Generic Driver Options # +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set # CONFIG_SYS_HYPERVISOR is not set - -# -# Connector - unified userspace <-> kernelspace linker -# # CONFIG_CONNECTOR is not set - -# -# Memory Technology Devices (MTD) -# # CONFIG_MTD is not set - -# -# Parallel port support -# # CONFIG_PARPORT is not set - -# -# Plug and Play support -# -# CONFIG_PNPACPI is not set - -# -# Block devices -# +CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set CONFIG_BLK_DEV_LOOP=y # CONFIG_BLK_DEV_CRYPTOLOOP is not set CONFIG_BLK_DEV_NBD=y # CONFIG_BLK_DEV_RAM is not set -# CONFIG_BLK_DEV_INITRD is not set # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set - -# -# Misc devices -# - -# -# ATA/ATAPI/MFM/RLL support -# +# CONFIG_MISC_DEVICES is not set # CONFIG_IDE is not set # @@ -456,48 +370,29 @@ CONFIG_BLK_DEV_NBD=y # # CONFIG_RAID_ATTRS is not set # CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set # CONFIG_SCSI_NETLINK is not set - -# -# Serial ATA (prod) and Parallel ATA (experimental) drivers -# # CONFIG_ATA is not set - -# -# Multi-device support (RAID and LVM) -# # CONFIG_MD is not set - -# -# Fusion MPT device support -# -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# - -# -# I2O device support -# - -# -# Network device support -# CONFIG_NETDEVICES=y +# CONFIG_NETDEVICES_MULTIQUEUE is not set # CONFIG_DUMMY is not set # CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set +# CONFIG_VETH is not set # CONFIG_PHYLIB is not set - -# -# Ethernet (10 or 100Mbit) -# CONFIG_NET_ETHERNET=y # CONFIG_MII is not set +# CONFIG_AX88796 is not set CONFIG_MIPS_SIM_NET=y # CONFIG_DM9000 is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_B44 is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set @@ -513,49 +408,18 @@ CONFIG_MIPS_SIM_NET=y # CONFIG_NETCONSOLE is not set # CONFIG_NETPOLL is not set # CONFIG_NET_POLL_CONTROLLER is not set - -# -# ISDN subsystem -# # CONFIG_ISDN is not set - -# -# Telephony Support -# # CONFIG_PHONE is not set # # Input device support # -CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set - -# -# Userland interfaces -# -# CONFIG_INPUT_MOUSEDEV is not set -# CONFIG_INPUT_JOYDEV is not set -# CONFIG_INPUT_TSDEV is not set -# CONFIG_INPUT_EVDEV is not set -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set +# CONFIG_INPUT is not set # # Hardware I/O ports # -CONFIG_SERIO=y -# CONFIG_SERIO_I8042 is not set -CONFIG_SERIO_SERPORT=y -# CONFIG_SERIO_LIBPS2 is not set -# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO is not set # CONFIG_GAMEPORT is not set # @@ -581,31 +445,13 @@ CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_UNIX98_PTYS=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 - -# -# IPMI -# # CONFIG_IPMI_HANDLER is not set - -# -# Watchdog Cards -# # CONFIG_WATCHDOG is not set # CONFIG_HW_RANDOM is not set # CONFIG_RTC is not set -# CONFIG_GEN_RTC is not set -# CONFIG_DTLK is not set # CONFIG_R3964 is not set # CONFIG_RAW_DRIVER is not set - -# -# TPM devices -# # CONFIG_TCG_TPM is not set - -# -# I2C support -# # CONFIG_I2C is not set # @@ -613,118 +459,60 @@ CONFIG_LEGACY_PTY_COUNT=256 # # CONFIG_SPI is not set # CONFIG_SPI_MASTER is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set # -# Dallas's 1-wire bus +# Sonics Silicon Backplane # -# CONFIG_W1 is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set # -# Hardware Monitoring support +# Multifunction device drivers # -# CONFIG_HWMON is not set -# CONFIG_HWMON_VID is not set +# CONFIG_MFD_SM501 is not set # # Multimedia devices # # CONFIG_VIDEO_DEV is not set - -# -# Digital Video Broadcasting Devices -# -# CONFIG_DVB is not set +# CONFIG_DVB_CORE is not set +# CONFIG_DAB is not set # # Graphics support # -# CONFIG_FIRMWARE_EDID is not set +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set # CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # -# Sound +# Display device support # -# CONFIG_SOUND is not set +# CONFIG_DISPLAY_SUPPORT is not set # -# HID Devices -# -# CONFIG_HID is not set - -# -# USB support -# -# CONFIG_USB_ARCH_HAS_HCD is not set -# CONFIG_USB_ARCH_HAS_OHCI is not set -# CONFIG_USB_ARCH_HAS_EHCI is not set - -# -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' -# - -# -# USB Gadget Support -# -# CONFIG_USB_GADGET is not set - -# -# MMC/SD Card support +# Sound # +# CONFIG_SOUND is not set +# CONFIG_USB_SUPPORT is not set # CONFIG_MMC is not set - -# -# LED devices -# # CONFIG_NEW_LEDS is not set - -# -# LED drivers -# - -# -# LED Triggers -# - -# -# InfiniBand support -# - -# -# EDAC - error detection and reporting (RAS) (EXPERIMENTAL) -# - -# -# Real Time Clock -# +CONFIG_RTC_LIB=y # CONFIG_RTC_CLASS is not set # -# DMA Engine support -# -# CONFIG_DMA_ENGINE is not set - -# -# DMA Clients -# - -# -# DMA Devices -# - -# -# Auxiliary Display support -# - -# -# Virtualization +# Userspace I/O # +# CONFIG_UIO is not set # # File systems # -CONFIG_EXT2_FS=y -# CONFIG_EXT2_FS_XATTR is not set -# CONFIG_EXT2_FS_XIP is not set +# CONFIG_EXT2_FS is not set # CONFIG_EXT3_FS is not set # CONFIG_EXT4DEV_FS is not set # CONFIG_REISERFS_FS is not set @@ -732,6 +520,7 @@ CONFIG_EXT2_FS=y # CONFIG_FS_POSIX_ACL is not set # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set # CONFIG_MINIX_FS is not set CONFIG_ROMFS_FS=y # CONFIG_INOTIFY is not set @@ -760,10 +549,11 @@ CONFIG_ROMFS_FS=y CONFIG_PROC_FS=y # CONFIG_PROC_KCORE is not set CONFIG_PROC_SYSCTL=y -# CONFIG_SYSFS is not set -# CONFIG_TMPFS is not set +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set -CONFIG_RAMFS=y +# CONFIG_CONFIGFS_FS is not set # # Miscellaneous filesystems @@ -781,10 +571,7 @@ CONFIG_RAMFS=y # CONFIG_QNX4FS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set - -# -# Network File Systems -# +CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y # CONFIG_NFS_V3_ACL is not set @@ -796,6 +583,7 @@ CONFIG_LOCKD=y CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y +# CONFIG_SUNRPC_BIND34 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -803,22 +591,14 @@ CONFIG_SUNRPC=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set -# CONFIG_9P_FS is not set # # Partition Types # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y - -# -# Native Language Support -# # CONFIG_NLS is not set - -# -# Distributed Lock Manager -# +# CONFIG_DLM is not set # # Profiling support @@ -833,20 +613,22 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_ENABLE_MUST_CHECK=y # CONFIG_MAGIC_SYSRQ is not set # CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set -CONFIG_LOG_BUF_SHIFT=14 # CONFIG_DETECT_SOFTLOCKUP is not set +# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set -CONFIG_DEBUG_MUTEXES=y +# CONFIG_DEBUG_MUTEXES is not set # CONFIG_DEBUG_LOCK_ALLOC is not set # CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set @@ -854,7 +636,9 @@ CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_LIST is not set CONFIG_FORCED_INLINING=y +# CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_FAULT_INJECTION is not set CONFIG_CROSSCOMPILE=y CONFIG_CMDLINE="nfsroot=192.168.192.169:/u1/mipsel,timeo=20 ip=dhcp" # CONFIG_DEBUG_STACK_USAGE is not set @@ -865,60 +649,20 @@ CONFIG_CMDLINE="nfsroot=192.168.192.169:/u1/mipsel,timeo=20 ip=dhcp" # Security options # # CONFIG_KEYS is not set - -# -# Cryptographic options -# -CONFIG_CRYPTO=y -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_BLKCIPHER=m -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -# CONFIG_CRYPTO_NULL is not set -# CONFIG_CRYPTO_MD4 is not set -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_SHA1 is not set -# CONFIG_CRYPTO_SHA256 is not set -# CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_WP512 is not set -# CONFIG_CRYPTO_TGR192 is not set -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_CBC=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_LRW=m -# CONFIG_CRYPTO_DES is not set -CONFIG_CRYPTO_FCRYPT=m -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_TWOFISH is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_AES is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_ARC4 is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_CRC32C is not set -CONFIG_CRYPTO_CAMELLIA=m -# CONFIG_CRYPTO_TEST is not set - -# -# Hardware crypto devices -# +# CONFIG_SECURITY is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_CRYPTO is not set # # Library routines # -CONFIG_BITREVERSE=y # CONFIG_CRC_CCITT is not set -CONFIG_CRC16=y -CONFIG_CRC32=y +# CONFIG_CRC16 is not set +# CONFIG_CRC_ITU_T is not set +# CONFIG_CRC32 is not set +# CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y diff --git a/arch/mips/configs/sb1250-swarm_defconfig b/arch/mips/configs/sb1250-swarm_defconfig index 3ed991a..49dfcef 100644 --- a/arch/mips/configs/sb1250-swarm_defconfig +++ b/arch/mips/configs/sb1250-swarm_defconfig @@ -196,6 +196,7 @@ CONFIG_SYSVIPC_SYSCTL=y # CONFIG_UTS_NS is not set # CONFIG_AUDIT is not set # CONFIG_IKCONFIG is not set +CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_SYSFS_DEPRECATED=y CONFIG_RELAY=y diff --git a/arch/mips/emma2rh/markeins/setup.c b/arch/mips/emma2rh/markeins/setup.c index 5e1da53..82f9e901 100644 --- a/arch/mips/emma2rh/markeins/setup.c +++ b/arch/mips/emma2rh/markeins/setup.c @@ -104,12 +104,6 @@ void __init plat_time_init(void) mips_hpt_frequency = (bus_frequency * (4 + reg)) / 4 / 2; } -void __init plat_timer_setup(struct irqaction *irq) -{ - /* we are using the cpu counter for timer interrupts */ - setup_irq(CPU_IRQ_BASE + 7, irq); -} - static void markeins_board_init(void); extern void markeins_irq_setup(void); diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c index 835b056..ae25b48 100644 --- a/arch/mips/jazz/irq.c +++ b/arch/mips/jazz/irq.c @@ -4,7 +4,7 @@ * for more details. * * Copyright (C) 1992 Linus Torvalds - * Copyright (C) 1994 - 2001, 2003 Ralf Baechle + * Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle */ #include <linux/clockchips.h> #include <linux/init.h> @@ -13,6 +13,7 @@ #include <linux/spinlock.h> #include <asm/irq_cpu.h> +#include <asm/i8253.h> #include <asm/i8259.h> #include <asm/io.h> #include <asm/jazz.h> @@ -136,7 +137,7 @@ static struct irqaction r4030_timer_irqaction = { .name = "timer", }; -void __init plat_timer_setup(struct irqaction *ignored) +void __init plat_time_init(void) { struct irqaction *irq = &r4030_timer_irqaction; @@ -152,4 +153,5 @@ void __init plat_timer_setup(struct irqaction *ignored) setup_irq(JAZZ_TIMER_IRQ, irq); clockevents_register_device(&r4030_clockevent); + setup_pit_timer(); } diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c index cfc7dce..a785797 100644 --- a/arch/mips/jazz/setup.c +++ b/arch/mips/jazz/setup.c @@ -5,7 +5,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1996, 1997, 1998, 2001 by Ralf Baechle + * Copyright (C) 1996, 1997, 1998, 2001, 07 by Ralf Baechle * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2007 by Thomas Bogendoerfer */ @@ -25,7 +25,6 @@ #include <linux/serial_8250.h> #include <asm/bootinfo.h> -#include <asm/i8253.h> #include <asm/irq.h> #include <asm/jazz.h> #include <asm/jazzdma.h> @@ -64,11 +63,6 @@ static struct resource jazz_io_resources[] = { } }; -void __init plat_time_init(void) -{ - setup_pit_timer(); -} - void __init plat_mem_setup(void) { int i; diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c index 0c7aee1..edb9e59 100644 --- a/arch/mips/jmr3927/rbhma3100/setup.c +++ b/arch/mips/jmr3927/rbhma3100/setup.c @@ -1,15 +1,4 @@ -/*********************************************************************** - * - * Copyright 2001 MontaVista Software Inc. - * Author: MontaVista Software, Inc. - * ahennessy@mvista.com - * - * Based on arch/mips/ddb5xxx/ddb5477/setup.c - * - * Setup file for JMR3927. - * - * Copyright (C) 2000-2001 Toshiba Corporation - * +/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your @@ -30,9 +19,15 @@ * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * - *********************************************************************** + * Copyright 2001 MontaVista Software Inc. + * Author: MontaVista Software, Inc. + * ahennessy@mvista.com + * + * Copyright (C) 2000-2001 Toshiba Corporation + * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) */ +#include <linux/clockchips.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kdev_t.h> @@ -104,27 +99,60 @@ static cycle_t jmr3927_hpt_read(void) return jiffies * (JMR3927_TIMER_CLK / HZ) + jmr3927_tmrptr->trr; } -static void jmr3927_timer_ack(void) +static void jmr3927_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + /* Nothing to do here */ +} + +struct clock_event_device jmr3927_clock_event_device = { + .name = "MIPS", + .features = CLOCK_EVT_FEAT_PERIODIC, + .shift = 32, + .rating = 300, + .cpumask = CPU_MASK_CPU0, + .irq = JMR3927_IRQ_TICK, + .set_mode = jmr3927_set_mode, +}; + +static irqreturn_t jmr3927_timer_interrupt(int irq, void *dev_id) { + struct clock_event_device *cd = &jmr3927_clock_event_device; + jmr3927_tmrptr->tisr = 0; /* ack interrupt */ + + cd->event_handler(cd); + + return IRQ_HANDLED; } +static struct irqaction jmr3927_timer_irqaction = { + .handler = jmr3927_timer_interrupt, + .flags = IRQF_DISABLED | IRQF_PERCPU, + .name = "jmr3927-timer", +}; + void __init plat_time_init(void) { + struct clock_event_device *cd; + clocksource_mips.read = jmr3927_hpt_read; - mips_timer_ack = jmr3927_timer_ack; mips_hpt_frequency = JMR3927_TIMER_CLK; -} -void __init plat_timer_setup(struct irqaction *irq) -{ jmr3927_tmrptr->cpra = JMR3927_TIMER_CLK / HZ; jmr3927_tmrptr->itmr = TXx927_TMTITMR_TIIE | TXx927_TMTITMR_TZCE; jmr3927_tmrptr->ccdr = JMR3927_TIMER_CCD; jmr3927_tmrptr->tcr = TXx927_TMTCR_TCE | TXx927_TMTCR_CCDE | TXx927_TMTCR_TMODE_ITVL; - setup_irq(JMR3927_IRQ_TICK, irq); + cd = &jmr3927_clock_event_device; + /* Calculate the min / max delta */ + cd->mult = div_sc((unsigned long) JMR3927_IMCLK, NSEC_PER_SEC, 32); + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + clockevents_register_device(cd); + + setup_irq(JMR3927_IRQ_TICK, &jmr3927_timer_irqaction); } #define DO_WRITE_THROUGH diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 95a356e..a3afa39 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -8,6 +8,8 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ time.o topology.o traps.o unaligned.o +obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o + binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ irix5sys.o sysirix.o diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c new file mode 100644 index 0000000..a915e56 --- /dev/null +++ b/arch/mips/kernel/cevt-r4k.c @@ -0,0 +1,273 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 MIPS Technologies, Inc. + * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> + +#include <asm/smtc_ipi.h> +#include <asm/time.h> + +static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned int cnt; + int res; + +#ifdef CONFIG_MIPS_MT_SMTC + { + unsigned long flags, vpflags; + local_irq_save(flags); + vpflags = dvpe(); +#endif + cnt = read_c0_count(); + cnt += delta; + write_c0_compare(cnt); + res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0; +#ifdef CONFIG_MIPS_MT_SMTC + evpe(vpflags); + local_irq_restore(flags); + } +#endif + return res; +} + +static void mips_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + /* Nothing to do ... */ +} + +static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); +static int cp0_timer_irq_installed; + +/* + * Timer ack for an R4k-compatible timer of a known frequency. + */ +static void c0_timer_ack(void) +{ + write_c0_compare(read_c0_compare()); +} + +/* + * Possibly handle a performance counter interrupt. + * Return true if the timer interrupt should not be checked + */ +static inline int handle_perf_irq(int r2) +{ + /* + * The performance counter overflow interrupt may be shared with the + * timer interrupt (cp0_perfcount_irq < 0). If it is and a + * performance counter has overflowed (perf_irq() == IRQ_HANDLED) + * and we can't reliably determine if a counter interrupt has also + * happened (!r2) then don't check for a timer interrupt. + */ + return (cp0_perfcount_irq < 0) && + perf_irq() == IRQ_HANDLED && + !r2; +} + +static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +{ + const int r2 = cpu_has_mips_r2; + struct clock_event_device *cd; + int cpu = smp_processor_id(); + + /* + * Suckage alert: + * Before R2 of the architecture there was no way to see if a + * performance counter interrupt was pending, so we have to run + * the performance counter interrupt handler anyway. + */ + if (handle_perf_irq(r2)) + goto out; + + /* + * The same applies to performance counter interrupts. But with the + * above we now know that the reason we got here must be a timer + * interrupt. Being the paranoiacs we are we check anyway. + */ + if (!r2 || (read_c0_cause() & (1 << 30))) { + c0_timer_ack(); +#ifdef CONFIG_MIPS_MT_SMTC + if (cpu_data[cpu].vpe_id) + goto out; + cpu = 0; +#endif + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + } + +out: + return IRQ_HANDLED; +} + +static struct irqaction c0_compare_irqaction = { + .handler = c0_compare_interrupt, +#ifdef CONFIG_MIPS_MT_SMTC + .flags = IRQF_DISABLED, +#else + .flags = IRQF_DISABLED | IRQF_PERCPU, +#endif + .name = "timer", +}; + +#ifdef CONFIG_MIPS_MT_SMTC +DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); + +static void smtc_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ +} + +static void mips_broadcast(cpumask_t mask) +{ + unsigned int cpu; + + for_each_cpu_mask(cpu, mask) + smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); +} + +static void setup_smtc_dummy_clockevent_device(void) +{ + //uint64_t mips_freq = mips_hpt_^frequency; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + + cd = &per_cpu(smtc_dummy_clockevent_device, cpu); + + cd->name = "SMTC"; + cd->features = CLOCK_EVT_FEAT_DUMMY; + + /* Calculate the min / max delta */ + cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); + cd->shift = 0; //32; + cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); + + cd->rating = 200; + cd->irq = 17; //-1; +// if (cpu) +// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); +// else + cd->cpumask = cpumask_of_cpu(cpu); + + cd->set_mode = smtc_set_mode; + + cd->broadcast = mips_broadcast; + + clockevents_register_device(cd); +} +#endif + +static void mips_event_handler(struct clock_event_device *dev) +{ +} + +/* + * FIXME: This doesn't hold for the relocated E9000 compare interrupt. + */ +static int c0_compare_int_pending(void) +{ + return (read_c0_cause() >> cp0_compare_irq) & 0x100; +} + +static int c0_compare_int_usable(void) +{ + const unsigned int delta = 0x300000; + unsigned int cnt; + + /* + * IP7 already pending? Try to clear it by acking the timer. + */ + if (c0_compare_int_pending()) { + write_c0_compare(read_c0_compare()); + irq_disable_hazard(); + if (c0_compare_int_pending()) + return 0; + } + + cnt = read_c0_count(); + cnt += delta; + write_c0_compare(cnt); + + while ((long)(read_c0_count() - cnt) <= 0) + ; /* Wait for expiry */ + + if (!c0_compare_int_pending()) + return 0; + + write_c0_compare(read_c0_compare()); + irq_disable_hazard(); + if (c0_compare_int_pending()) + return 0; + + /* + * Feels like a real count / compare timer. + */ + return 1; +} + +void __cpuinit mips_clockevent_init(void) +{ + uint64_t mips_freq = mips_hpt_frequency; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq = MIPS_CPU_IRQ_BASE + 7; + + if (!cpu_has_counter) + return; + +#ifdef CONFIG_MIPS_MT_SMTC + setup_smtc_dummy_clockevent_device(); + + /* + * On SMTC we only register VPE0's compare interrupt as clockevent + * device. + */ + if (cpu) + return; +#endif + + if (!c0_compare_int_usable()) + return; + + cd = &per_cpu(mips_clockevent_device, cpu); + + cd->name = "MIPS"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + /* Calculate the min / max delta */ + cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); + cd->shift = 32; + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; +#ifdef CONFIG_MIPS_MT_SMTC + cd->cpumask = CPU_MASK_ALL; +#else + cd->cpumask = cpumask_of_cpu(cpu); +#endif + cd->set_next_event = mips_next_event; + cd->set_mode = mips_set_mode; + cd->event_handler = mips_event_handler; + + clockevents_register_device(cd); + + if (!cp0_timer_irq_installed) { +#ifdef CONFIG_MIPS_MT_SMTC +#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) + setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); +#else + setup_irq(irq, &c0_compare_irqaction); +#endif /* CONFIG_MIPS_MT_SMTC */ + cp0_timer_irq_installed = 1; + } +} diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index bf164a5..2367687 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -27,16 +27,6 @@ #include <kernel-entry-init.h> - .macro ARC64_TWIDDLE_PC -#if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL) - /* We get launched at a XKPHYS address but the kernel is linked to - run at a KSEG0 address, so jump there. */ - PTR_LA t0, \@f - jr t0 -\@: -#endif - .endm - /* * inputs are the text nasid in t1, data nasid in t2. */ @@ -157,7 +147,11 @@ NESTED(kernel_entry, 16, sp) # kernel entry point setup_c0_status_pri - ARC64_TWIDDLE_PC + /* We might not get launched at the address the kernel is linked to, + so we jump there. */ + PTR_LA t0, 0f + jr t0 +0: #ifdef CONFIG_MIPS_MT_SMTC /* diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c index b997af7..7852c7c 100644 --- a/arch/mips/kernel/irixelf.c +++ b/arch/mips/kernel/irixelf.c @@ -1172,8 +1172,8 @@ static int irix_core_dump(long signr, struct pt_regs *regs, struct file *file, u prstatus.pr_sighold = current->blocked.sig[0]; psinfo.pr_pid = prstatus.pr_pid = current->pid; psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid; - psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current); - psinfo.pr_sid = prstatus.pr_sid = process_session(current); + psinfo.pr_pgrp = prstatus.pr_pgrp = task_pgrp_nr(current); + psinfo.pr_sid = prstatus.pr_sid = task_session_nr(current); if (current->pid == current->tgid) { /* * This is the record for the group leader. Add in the diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c index 85c2e38..a0a9105 100644 --- a/arch/mips/kernel/irixsig.c +++ b/arch/mips/kernel/irixsig.c @@ -609,7 +609,7 @@ repeat: p = list_entry(_p, struct task_struct, sibling); if ((type == IRIX_P_PID) && p->pid != pid) continue; - if ((type == IRIX_P_PGID) && process_group(p) != pid) + if ((type == IRIX_P_PGID) && task_pgrp_nr(p) != pid) continue; if ((p->exit_signal != SIGCHLD)) continue; diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index cb08014..e7ed0ac 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -381,7 +381,7 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr) return e; } -/* Put in dbe list if neccessary. */ +/* Put in dbe list if necessary. */ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c index ee7790d..4c477c7 100644 --- a/arch/mips/kernel/sysirix.c +++ b/arch/mips/kernel/sysirix.c @@ -763,11 +763,11 @@ asmlinkage int irix_setpgrp(int flags) printk("[%s:%d] setpgrp(%d) ", current->comm, current->pid, flags); #endif if(!flags) - error = process_group(current); + error = task_pgrp_nr(current); else error = sys_setsid(); #ifdef DEBUG_PROCGRPS - printk("returning %d\n", process_group(current)); + printk("returning %d\n", task_pgrp_nr(current)); #endif return error; diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index e4b5e64..c4e6866 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -40,17 +40,6 @@ #include <irq.h> /* - * The integer part of the number of usecs per jiffy is taken from tick, - * but the fractional part is not recorded, so we calculate it using the - * initial value of HZ. This aids systems where tick isn't really an - * integer (e.g. for HZ = 128). - */ -#define USECS_PER_JIFFY TICK_SIZE -#define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ)) - -#define TICK_SIZE (tick_nsec / 1000) - -/* * forward reference */ DEFINE_SPINLOCK(rtc_lock); @@ -72,14 +61,6 @@ int update_persistent_clock(struct timespec now) return rtc_mips_set_mmss(now.tv_sec); } -/* how many counter cycles in a jiffy */ -static unsigned long cycles_per_jiffy __read_mostly; - -/* - * Null timer ack for systems not needing one (e.g. i8254). - */ -static void null_timer_ack(void) { /* nothing */ } - /* * Null high precision timer functions for systems lacking one. */ @@ -89,14 +70,6 @@ static cycle_t null_hpt_read(void) } /* - * Timer ack for an R4k-compatible timer of a known frequency. - */ -static void c0_timer_ack(void) -{ - write_c0_compare(read_c0_compare()); -} - -/* * High precision timer functions for a R4k-compatible timer. */ static cycle_t c0_hpt_read(void) @@ -105,7 +78,6 @@ static cycle_t c0_hpt_read(void) } int (*mips_timer_state)(void); -void (*mips_timer_ack)(void); /* * local_timer_interrupt() does profiling and process accounting @@ -135,35 +107,6 @@ int (*perf_irq)(void) = null_perf_irq; EXPORT_SYMBOL(perf_irq); /* - * Timer interrupt - */ -int cp0_compare_irq; - -/* - * Performance counter IRQ or -1 if shared with timer - */ -int cp0_perfcount_irq; -EXPORT_SYMBOL_GPL(cp0_perfcount_irq); - -/* - * Possibly handle a performance counter interrupt. - * Return true if the timer interrupt should not be checked - */ -static inline int handle_perf_irq(int r2) -{ - /* - * The performance counter overflow interrupt may be shared with the - * timer interrupt (cp0_perfcount_irq < 0). If it is and a - * performance counter has overflowed (perf_irq() == IRQ_HANDLED) - * and we can't reliably determine if a counter interrupt has also - * happened (!r2) then don't check for a timer interrupt. - */ - return (cp0_perfcount_irq < 0) && - perf_irq() == IRQ_HANDLED && - !r2; -} - -/* * time_init() - it does the following things. * * 1) plat_time_init() - @@ -228,270 +171,58 @@ struct clocksource clocksource_mips = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static int mips_next_event(unsigned long delta, - struct clock_event_device *evt) +void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock) { - unsigned int cnt; - int res; - -#ifdef CONFIG_MIPS_MT_SMTC - { - unsigned long flags, vpflags; - local_irq_save(flags); - vpflags = dvpe(); -#endif - cnt = read_c0_count(); - cnt += delta; - write_c0_compare(cnt); - res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0; -#ifdef CONFIG_MIPS_MT_SMTC - evpe(vpflags); - local_irq_restore(flags); - } -#endif - return res; -} - -static void mips_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - /* Nothing to do ... */ -} - -static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); -static int cp0_timer_irq_installed; - -static irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - const int r2 = cpu_has_mips_r2; - struct clock_event_device *cd; - int cpu = smp_processor_id(); - - /* - * Suckage alert: - * Before R2 of the architecture there was no way to see if a - * performance counter interrupt was pending, so we have to run - * the performance counter interrupt handler anyway. - */ - if (handle_perf_irq(r2)) - goto out; + u64 temp; + u32 shift; - /* - * The same applies to performance counter interrupts. But with the - * above we now know that the reason we got here must be a timer - * interrupt. Being the paranoiacs we are we check anyway. - */ - if (!r2 || (read_c0_cause() & (1 << 30))) { - c0_timer_ack(); -#ifdef CONFIG_MIPS_MT_SMTC - if (cpu_data[cpu].vpe_id) - goto out; - cpu = 0; -#endif - cd = &per_cpu(mips_clockevent_device, cpu); - cd->event_handler(cd); + /* Find a shift value */ + for (shift = 32; shift > 0; shift--) { + temp = (u64) NSEC_PER_SEC << shift; + do_div(temp, clock); + if ((temp >> 32) == 0) + break; } - -out: - return IRQ_HANDLED; + cs->shift = shift; + cs->mult = (u32) temp; } -static struct irqaction timer_irqaction = { - .handler = timer_interrupt, -#ifdef CONFIG_MIPS_MT_SMTC - .flags = IRQF_DISABLED, -#else - .flags = IRQF_DISABLED | IRQF_PERCPU, -#endif - .name = "timer", -}; - -static void __init init_mips_clocksource(void) +void __cpuinit clockevent_set_clock(struct clock_event_device *cd, + unsigned int clock) { u64 temp; u32 shift; - if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read) - return; - - /* Calclate a somewhat reasonable rating value */ - clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; /* Find a shift value */ for (shift = 32; shift > 0; shift--) { temp = (u64) NSEC_PER_SEC << shift; - do_div(temp, mips_hpt_frequency); + do_div(temp, clock); if ((temp >> 32) == 0) break; } - clocksource_mips.shift = shift; - clocksource_mips.mult = (u32)temp; - - clocksource_register(&clocksource_mips); -} - -void __init __weak plat_time_init(void) -{ + cd->shift = shift; + cd->mult = (u32) temp; } -void __init __weak plat_timer_setup(struct irqaction *irq) -{ -} - -#ifdef CONFIG_MIPS_MT_SMTC -DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); - -static void smtc_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ -} - -int dummycnt[NR_CPUS]; - -static void mips_broadcast(cpumask_t mask) -{ - unsigned int cpu; - - for_each_cpu_mask(cpu, mask) - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); -} - -static void setup_smtc_dummy_clockevent_device(void) +static void __init init_mips_clocksource(void) { - //uint64_t mips_freq = mips_hpt_^frequency; - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - - cd = &per_cpu(smtc_dummy_clockevent_device, cpu); - - cd->name = "SMTC"; - cd->features = CLOCK_EVT_FEAT_DUMMY; - - /* Calculate the min / max delta */ - cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 0; //32; - cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); - - cd->rating = 200; - cd->irq = 17; //-1; -// if (cpu) -// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); -// else - cd->cpumask = cpumask_of_cpu(cpu); - - cd->set_mode = smtc_set_mode; - - cd->broadcast = mips_broadcast; + if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read) + return; - clockevents_register_device(cd); -} -#endif + /* Calclate a somewhat reasonable rating value */ + clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; -static void mips_event_handler(struct clock_event_device *dev) -{ -} + clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); -/* - * FIXME: This doesn't hold for the relocated E9000 compare interrupt. - */ -static int c0_compare_int_pending(void) -{ - return (read_c0_cause() >> cp0_compare_irq) & 0x100; + clocksource_register(&clocksource_mips); } -static int c0_compare_int_usable(void) +void __init __weak plat_time_init(void) { - const unsigned int delta = 0x300000; - unsigned int cnt; - - /* - * IP7 already pending? Try to clear it by acking the timer. - */ - if (c0_compare_int_pending()) { - write_c0_compare(read_c0_compare()); - irq_disable_hazard(); - if (c0_compare_int_pending()) - return 0; - } - - cnt = read_c0_count(); - cnt += delta; - write_c0_compare(cnt); - - while ((long)(read_c0_count() - cnt) <= 0) - ; /* Wait for expiry */ - - if (!c0_compare_int_pending()) - return 0; - - write_c0_compare(read_c0_compare()); - irq_disable_hazard(); - if (c0_compare_int_pending()) - return 0; - - /* - * Feels like a real count / compare timer. - */ - return 1; } -void __cpuinit mips_clockevent_init(void) +void __init __weak plat_timer_setup(struct irqaction *irq) { - uint64_t mips_freq = mips_hpt_frequency; - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - unsigned int irq = MIPS_CPU_IRQ_BASE + 7; - - if (!cpu_has_counter) - return; - -#ifdef CONFIG_MIPS_MT_SMTC - setup_smtc_dummy_clockevent_device(); - - /* - * On SMTC we only register VPE0's compare interrupt as clockevent - * device. - */ - if (cpu) - return; -#endif - - if (!c0_compare_int_usable()) - return; - - cd = &per_cpu(mips_clockevent_device, cpu); - - cd->name = "MIPS"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; - - /* Calculate the min / max delta */ - cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 32; - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = clockevent_delta2ns(0x300, cd); - - cd->rating = 300; - cd->irq = irq; -#ifdef CONFIG_MIPS_MT_SMTC - cd->cpumask = CPU_MASK_ALL; -#else - cd->cpumask = cpumask_of_cpu(cpu); -#endif - cd->set_next_event = mips_next_event; - cd->set_mode = mips_set_mode; - cd->event_handler = mips_event_handler; - - clockevents_register_device(cd); - - if (!cp0_timer_irq_installed) { -#ifdef CONFIG_MIPS_MT_SMTC -#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) - setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT); -#else - setup_irq(irq, &timer_irqaction); -#endif /* CONFIG_MIPS_MT_SMTC */ - cp0_timer_irq_installed = 1; - } } void __init time_init(void) @@ -512,14 +243,6 @@ void __init time_init(void) if (!clocksource_mips.read) { /* No external high precision timer -- use R4k. */ clocksource_mips.read = c0_hpt_read; - - if (!mips_timer_state) { - /* No external timer interrupt -- use R4k. */ - mips_timer_ack = c0_timer_ack; - /* Calculate cache parameters. */ - cycles_per_jiffy = - (mips_hpt_frequency + HZ / 2) / HZ; - } } if (!mips_hpt_frequency) mips_hpt_frequency = calibrate_hpt(); @@ -528,29 +251,8 @@ void __init time_init(void) printk("Using %u.%03u MHz high precision timer.\n", ((mips_hpt_frequency + 500) / 1000) / 1000, ((mips_hpt_frequency + 500) / 1000) % 1000); - -#ifdef CONFIG_IRQ_CPU - setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction); -#endif } - if (!mips_timer_ack) - /* No timer interrupt ack (e.g. i8254). */ - mips_timer_ack = null_timer_ack; - - /* - * Call board specific timer interrupt setup. - * - * this pointer must be setup in machine setup routine. - * - * Even if a machine chooses to use a low-level timer interrupt, - * it still needs to setup the timer_irqaction. - * In that case, it might be better to set timer_irqaction.handler - * to be NULL function so that we are sure the high-level code - * is not invoked accidentally. - */ - plat_timer_setup(&timer_irqaction); - init_mips_clocksource(); mips_clockevent_init(); } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index bbf01b8..fa50078 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -314,7 +314,7 @@ void show_registers(const struct pt_regs *regs) __show_regs(regs); print_modules(); printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n", - current->comm, current->pid, current_thread_info(), current); + current->comm, task_pid_nr(current), current_thread_info(), current); show_stacktrace(current, regs); show_code((unsigned int __user *) regs->cp0_epc); printk("\n"); @@ -1336,6 +1336,17 @@ extern void cpu_cache_init(void); extern void tlb_init(void); extern void flush_tlb_handlers(void); +/* + * Timer interrupt + */ +int cp0_compare_irq; + +/* + * Performance counter IRQ or -1 if shared with timer + */ +int cp0_perfcount_irq; +EXPORT_SYMBOL_GPL(cp0_perfcount_irq); + void __init per_cpu_trap_init(void) { unsigned int cpu = smp_processor_id(); diff --git a/arch/mips/lemote/lm2e/setup.c b/arch/mips/lemote/lm2e/setup.c index 09314a2..2cc6745 100644 --- a/arch/mips/lemote/lm2e/setup.c +++ b/arch/mips/lemote/lm2e/setup.c @@ -53,11 +53,6 @@ unsigned long bus_clock; unsigned int memsize; unsigned int highmemsize = 0; -void __init plat_timer_setup(struct irqaction *irq) -{ - setup_irq(MIPS_CPU_IRQ_BASE + 7, irq); -} - void __init plat_time_init(void) { /* setup mips r4k timer */ diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 5699c77..fa636fc 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -173,7 +173,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(tsk)) { + if (is_global_init(tsk)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/mips/oprofile/Kconfig b/arch/mips/oprofile/Kconfig deleted file mode 100644 index fb6f235..0000000 --- a/arch/mips/oprofile/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ - -menu "Profiling support" - depends on EXPERIMENTAL - -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING && !MIPS_MT_SMTC && EXPERIMENTAL - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -endmenu - diff --git a/arch/mips/pci/pci-excite.c b/arch/mips/pci/pci-excite.c index 3c86c77..8a56876 100644 --- a/arch/mips/pci/pci-excite.c +++ b/arch/mips/pci/pci-excite.c @@ -131,7 +131,7 @@ static int __init basler_excite_pci_setup(void) ocd_writel(0x00000000, bar + 0x100); } - /* Finally, enable the PCI interupt */ + /* Finally, enable the PCI interrupt */ #if USB_IRQ > 7 set_c0_intcontrol(1 << USB_IRQ); #else diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index abbd0bb..6b293ce 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig @@ -4,11 +4,13 @@ choice config PMC_MSP4200_EVAL bool "PMC-Sierra MSP4200 Eval Board" + select CEVT_R4K select IRQ_MSP_SLP select HW_HAS_PCI config PMC_MSP4200_GW bool "PMC-Sierra MSP4200 VoIP Gateway" + select CEVT_R4K select IRQ_MSP_SLP select HW_HAS_PCI diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index f221d47..7cfeda5 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c @@ -86,8 +86,5 @@ void __init plat_timer_setup(struct irqaction *irq) #ifdef CONFIG_IRQ_MSP_CIC /* we are using the vpe0 counter for timer interrupts */ setup_irq(MSP_INT_VPE0_TIMER, irq); -#else - /* we are using the mips counter for timer interrupts */ - setup_irq(MSP_INT_TIMER, irq); #endif } diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c index 015fcc3..855977c 100644 --- a/arch/mips/pmc-sierra/yosemite/setup.c +++ b/arch/mips/pmc-sierra/yosemite/setup.c @@ -137,11 +137,6 @@ int rtc_mips_set_time(unsigned long tim) return 0; } -void __init plat_timer_setup(struct irqaction *irq) -{ - setup_irq(7, irq); -} - void __init plat_time_init(void) { mips_hpt_frequency = cpu_clock_freq / 2; diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 856649c..1bb692a 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -374,14 +374,13 @@ int __devinit request_bridge_irq(struct bridge_controller *bc) return irq; } -extern void ip27_rt_timer_interrupt(void); - asmlinkage void plat_irq_dispatch(void) { unsigned long pending = read_c0_cause() & read_c0_status(); + extern unsigned int rt_timer_irq; if (pending & CAUSEF_IP4) - ip27_rt_timer_interrupt(); + do_IRQ(rt_timer_irq); else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */ ip27_do_irq_mask0(); else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */ diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index b7b3479..d467bf4 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -3,6 +3,7 @@ * Copytight (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/bcd.h> +#include <linux/clockchips.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -25,22 +26,8 @@ #include <asm/sn/sn0/ip27.h> #include <asm/sn/sn0/hub.h> -/* - * This is a hack; we really need to figure these values out dynamically - * - * Since 800 ns works very well with various HUB frequencies, such as - * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time. - * - * Ralf: which clock rate is used to feed the counter? - */ -#define NSEC_PER_CYCLE 800 -#define CYCLES_PER_SEC (NSEC_PER_SEC/NSEC_PER_CYCLE) -#define CYCLES_PER_JIFFY (CYCLES_PER_SEC/HZ) - #define TICK_SIZE (tick_nsec / 1000) -static unsigned long ct_cur[NR_CPUS]; /* What counter should be at next timer irq */ - #if 0 static int set_rtc_mmss(unsigned long nowtime) { @@ -86,36 +73,6 @@ static int set_rtc_mmss(unsigned long nowtime) } #endif -static unsigned int rt_timer_irq; - -void ip27_rt_timer_interrupt(void) -{ - int cpu = smp_processor_id(); - int cpuA = cputoslice(cpu) == 0; - unsigned int irq = rt_timer_irq; - - irq_enter(); - write_seqlock(&xtime_lock); - -again: - LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0); /* Ack */ - ct_cur[cpu] += CYCLES_PER_JIFFY; - LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]); - - if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu]) - goto again; - - kstat_this_cpu.irqs[irq]++; /* kstat only for bootcpu? */ - - if (cpu == 0) - do_timer(1); - - update_process_times(user_mode(get_irq_regs())); - - write_sequnlock(&xtime_lock); - irq_exit(); -} - /* Includes for ioc3_init(). */ #include <asm/sn/types.h> #include <asm/sn/sn0/addrs.h> @@ -154,6 +111,46 @@ unsigned long read_persistent_clock(void) return mktime(year, month, date, hour, min, sec); } +static int rt_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned int cpu = smp_processor_id(); + int slice = cputoslice(cpu) == 0; + unsigned long cnt; + + cnt = LOCAL_HUB_L(PI_RT_COUNT); + cnt += delta; + LOCAL_HUB_S(slice ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, cnt); + + return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0; +} + +static void rt_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + /* The only mode supported */ + break; + + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_RESUME: + /* Nothing to do */ + break; + } +} + +struct clock_event_device rt_clock_event_device = { + .name = "HUB-RT", + .features = CLOCK_EVT_FEAT_ONESHOT, + + .rating = 300, + .set_next_event = rt_set_next_event, + .set_mode = rt_set_mode, +}; + static void enable_rt_irq(unsigned int irq) { } @@ -171,6 +168,20 @@ static struct irq_chip rt_irq_type = { .eoi = enable_rt_irq, }; +unsigned int rt_timer_irq; + +static irqreturn_t ip27_rt_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *cd = &rt_clock_event_device; + unsigned int cpu = smp_processor_id(); + int slice = cputoslice(cpu) == 0; + + LOCAL_HUB_S(slice ? PI_RT_PEND_A : PI_RT_PEND_B, 0); /* Ack */ + cd->event_handler(cd); + + return IRQ_HANDLED; +} + static struct irqaction rt_irqaction = { .handler = (irq_handler_t) ip27_rt_timer_interrupt, .flags = IRQF_DISABLED, @@ -178,26 +189,43 @@ static struct irqaction rt_irqaction = { .name = "timer" }; -void __init plat_timer_setup(struct irqaction *irq) +/* + * This is a hack; we really need to figure these values out dynamically + * + * Since 800 ns works very well with various HUB frequencies, such as + * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time. + * + * Ralf: which clock rate is used to feed the counter? + */ +#define NSEC_PER_CYCLE 800 +#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE) + +static void __init ip27_rt_clock_event_init(void) { - int irqno = allocate_irqno(); + struct clock_event_device *cd = &rt_clock_event_device; + unsigned int cpu = smp_processor_id(); + int irq = allocate_irqno(); - if (irqno < 0) + if (irq < 0) panic("Can't allocate interrupt number for timer interrupt"); - set_irq_chip_and_handler(irqno, &rt_irq_type, handle_percpu_irq); + rt_timer_irq = irq; - /* over-write the handler, we use our own way */ - irq->handler = no_action; + cd->irq = irq, + cd->cpumask = cpumask_of_cpu(cpu), - /* setup irqaction */ - irq_desc[irqno].status |= IRQ_PER_CPU; - - rt_timer_irq = irqno; /* - * Only needed to get /proc/interrupt to display timer irq stats + * Calculate the min / max delta */ - setup_irq(irqno, &rt_irqaction); + cd->mult = + div_sc((unsigned long) CYCLES_PER_SEC, NSEC_PER_SEC, 32); + cd->shift = 32; + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + clockevents_register_device(cd); + + set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); + setup_irq(irq, &rt_irqaction); } static cycle_t hub_rt_read(void) @@ -206,7 +234,7 @@ static cycle_t hub_rt_read(void) } struct clocksource ht_rt_clocksource = { - .name = "HUB", + .name = "HUB-RT", .rating = 200, .read = hub_rt_read, .mask = CLOCKSOURCE_MASK(52), @@ -214,11 +242,17 @@ struct clocksource ht_rt_clocksource = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -void __init plat_time_init(void) +static void __init ip27_rt_clocksource_init(void) { clocksource_register(&ht_rt_clocksource); } +void __init plat_time_init(void) +{ + ip27_rt_clock_event_init(); + ip27_rt_clocksource_init(); +} + void __init cpu_time_init(void) { lboard_t *board; @@ -248,17 +282,12 @@ void __init hub_rtc_init(cnodeid_t cnode) * node and timeouts will not happen there. */ if (get_compact_nodeid() == cnode) { - int cpu = smp_processor_id(); LOCAL_HUB_S(PI_RT_EN_A, 1); LOCAL_HUB_S(PI_RT_EN_B, 1); LOCAL_HUB_S(PI_PROF_EN_A, 0); LOCAL_HUB_S(PI_PROF_EN_B, 0); - ct_cur[cpu] = CYCLES_PER_JIFFY; - LOCAL_HUB_S(PI_RT_COMPARE_A, ct_cur[cpu]); LOCAL_HUB_S(PI_RT_COUNT, 0); LOCAL_HUB_S(PI_RT_PEND_A, 0); - LOCAL_HUB_S(PI_RT_COMPARE_B, ct_cur[cpu]); - LOCAL_HUB_S(PI_RT_COUNT, 0); LOCAL_HUB_S(PI_RT_PEND_B, 0); } } diff --git a/arch/mips/sgi-ip32/ip32-setup.c b/arch/mips/sgi-ip32/ip32-setup.c index fc75bfc..1024bf4 100644 --- a/arch/mips/sgi-ip32/ip32-setup.c +++ b/arch/mips/sgi-ip32/ip32-setup.c @@ -80,12 +80,6 @@ void __init plat_time_init(void) printk("%d MHz CPU detected\n", mips_hpt_frequency * 2 / 1000000); } -void __init plat_timer_setup(struct irqaction *irq) -{ - irq->handler = no_action; - setup_irq(MIPS_CPU_IRQ_BASE + 7, irq); -} - void __init plat_mem_setup(void) { board_be_init = ip32_be_init; diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index 6eac36d..02b266a 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -69,8 +69,9 @@ void bcm1480_smp_init(void) void bcm1480_smp_finish(void) { - extern void bcm1480_time_init(void); - bcm1480_time_init(); + extern void sb1480_clockevent_init(void); + + sb1480_clockevent_init(); local_irq_enable(); } diff --git a/arch/mips/sibyte/bcm1480/time.c b/arch/mips/sibyte/bcm1480/time.c index 5b4bfbb..c730744 100644 --- a/arch/mips/sibyte/bcm1480/time.c +++ b/arch/mips/sibyte/bcm1480/time.c @@ -27,9 +27,8 @@ */ #include <linux/clockchips.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/percpu.h> #include <linux/spinlock.h> -#include <linux/kernel_stat.h> #include <asm/irq.h> #include <asm/addrspace.h> @@ -101,25 +100,36 @@ static void sibyte_set_mode(enum clock_event_mode mode, break; case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ + case CLOCK_EVT_MODE_RESUME: ; } } -struct clock_event_device sibyte_hpt_clockevent = { - .name = "bcm1480-counter", - .features = CLOCK_EVT_FEAT_PERIODIC, - .set_mode = sibyte_set_mode, - .shift = 32, - .irq = 0, -}; +static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd) +{ + unsigned int cpu = smp_processor_id(); + void __iomem *timer_init; + unsigned int cnt; + int res; + + timer_init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); + cnt = __raw_readq(timer_init); + cnt += delta; + __raw_writeq(cnt, timer_init); + res = ((long)(__raw_readq(timer_init) - cnt ) > 0) ? -ETIME : 0; + + return res; +} + +static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) { - struct clock_event_device *cd = &sibyte_hpt_clockevent; unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); /* Reset the timer */ - __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS, + __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); cd->event_handler(cd); @@ -140,24 +150,21 @@ static struct irqaction sibyte_counter_irqaction = { * called directly from irq_handler.S when IP[4] is set during an * interrupt */ -static void __init sb1480_clockevent_init(void) +void __cpuinit sb1480_clockevent_init(void) { unsigned int cpu = smp_processor_id(); unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; + struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); - setup_irq(irq, &sibyte_counter_irqaction); -} + cd->name = "bcm1480-counter"; + cd->features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_MODE_ONESHOT; + cd->set_next_event = sibyte_next_event; + cd->set_mode = sibyte_set_mode; + cd->irq = irq; + clockevent_set_clock(cd, BCM1480_HPT_VALUE); -void bcm1480_timer_interrupt(void) -{ - int cpu = smp_processor_id(); - int irq = K_BCM1480_INT_TIMER_0 + cpu; - - /* Reset the timer */ - __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS, - IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); - - ll_timer_interrupt(irq); + setup_irq(irq, &sibyte_counter_irqaction); } static cycle_t bcm1480_hpt_read(void) @@ -168,9 +175,26 @@ static cycle_t bcm1480_hpt_read(void) return (jiffies + 1) * (BCM1480_HPT_VALUE / HZ) - count; } +struct clocksource bcm1480_clocksource = { + .name = "MIPS", + .rating = 200, + .read = bcm1480_hpt_read, + .mask = CLOCKSOURCE_MASK(32), + .shift = 32, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +void __init sb1480_clocksource_init(void) +{ + struct clocksource *cs = &bcm1480_clocksource; + + clocksource_set_clock(cs, BCM1480_HPT_VALUE); + clocksource_register(cs); +} + void __init bcm1480_hpt_setup(void) { - clocksource_mips.read = bcm1480_hpt_read; mips_hpt_frequency = BCM1480_HPT_VALUE; + sb1480_clocksource_init(); sb1480_clockevent_init(); } diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index 7659174..500d17e 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@ -400,43 +400,11 @@ static void sb1250_kgdb_interrupt(void) #endif /* CONFIG_KGDB */ -static inline void sb1250_timer_interrupt(void) -{ - int cpu = smp_processor_id(); - int irq = K_INT_TIMER_0 + cpu; - - irq_enter(); - kstat_this_cpu.irqs[irq]++; - - write_seqlock(&xtime_lock); - - /* ACK interrupt */ - ____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, - IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); - - /* - * call the generic timer interrupt handling - */ - do_timer(1); - - write_sequnlock(&xtime_lock); - - /* - * In UP mode, we call local_timer_interrupt() to do profiling - * and process accouting. - * - * In SMP mode, local_timer_interrupt() is invoked by appropriate - * low-level local timer interrupt handler. - */ - local_timer_interrupt(irq); - - irq_exit(); -} - extern void sb1250_mailbox_interrupt(void); asmlinkage void plat_irq_dispatch(void) { + unsigned int cpu = smp_processor_id(); unsigned int pending; /* @@ -454,7 +422,7 @@ asmlinkage void plat_irq_dispatch(void) if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */ do_IRQ(MIPS_CPU_IRQ_BASE + 7); else if (pending & CAUSEF_IP4) - sb1250_timer_interrupt(); + do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */ #ifdef CONFIG_SMP else if (pending & CAUSEF_IP3) diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index c38e1f3..aaa4f30 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -57,8 +57,9 @@ void sb1250_smp_init(void) void sb1250_smp_finish(void) { - extern void sb1250_time_init(void); - sb1250_time_init(); + extern void sb1250_clockevent_init(void); + + sb1250_clockevent_init(); local_irq_enable(); } diff --git a/arch/mips/sibyte/sb1250/time.c b/arch/mips/sibyte/sb1250/time.c index fe11fed..9ef5462 100644 --- a/arch/mips/sibyte/sb1250/time.c +++ b/arch/mips/sibyte/sb1250/time.c @@ -100,6 +100,7 @@ static void sibyte_set_mode(enum clock_event_mode mode, break; case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ + case CLOCK_EVT_MODE_RESUME: ; } } @@ -144,79 +145,7 @@ static struct irqaction sibyte_irqaction = { .name = "timer", }; -/* - * The general purpose timer ticks at 1 Mhz independent if - * the rest of the system - */ -static void sibyte_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - unsigned int cpu = smp_processor_id(); - void __iomem *timer_cfg, *timer_init; - - timer_cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); - timer_init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - __raw_writeq(0, timer_cfg); - __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, timer_init); - __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, - timer_cfg); - break; - - case CLOCK_EVT_MODE_ONESHOT: - /* Stop the timer until we actually program a shot */ - case CLOCK_EVT_MODE_SHUTDOWN: - __raw_writeq(0, timer_cfg); - break; - - case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ - ; - } -} - -static int -sibyte_next_event(unsigned long delta, struct clock_event_device *evt) -{ - unsigned int cpu = smp_processor_id(); - void __iomem *timer_cfg, *timer_init; - - timer_cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); - timer_init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); - - __raw_writeq(0, timer_cfg); - __raw_writeq(delta, timer_init); - __raw_writeq(M_SCD_TIMER_ENABLE, timer_cfg); - - return 0; -} - -struct clock_event_device sibyte_hpt_clockevent = { - .name = "sb1250-counter", - .features = CLOCK_EVT_FEAT_PERIODIC, - .set_mode = sibyte_set_mode, - .set_next_event = sibyte_next_event, - .shift = 32, - .irq = 0, -}; - -static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) -{ - struct clock_event_device *cd = &sibyte_hpt_clockevent; - - cd->event_handler(cd); - - return IRQ_HANDLED; -} - -static struct irqaction sibyte_irqaction = { - .handler = sibyte_counter_handler, - .flags = IRQF_DISABLED | IRQF_PERCPU, - .name = "timer", -}; - -static void __init sb1250_clockevent_init(void) +void __cpuinit sb1250_clockevent_init(void) { struct clock_event_device *cd = &sibyte_hpt_clockevent; unsigned int cpu = smp_processor_id(); @@ -249,12 +178,6 @@ static void __init sb1250_clockevent_init(void) clockevents_register_device(cd); } -void __init plat_time_init(void) -{ - sb1250_clocksource_init(); - sb1250_clockevent_init(); -} - /* * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over * again. @@ -267,3 +190,26 @@ static cycle_t sb1250_hpt_read(void) return SB1250_HPT_VALUE - count; } + +struct clocksource bcm1250_clocksource = { + .name = "MIPS", + .rating = 200, + .read = sb1250_hpt_read, + .mask = CLOCKSOURCE_MASK(32), + .shift = 32, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +void __init sb1250_clocksource_init(void) +{ + struct clocksource *cs = &bcm1250_clocksource; + + clocksource_set_clock(cs, V_SCD_TIMER_FREQ); + clocksource_register(cs); +} + +void __init plat_time_init(void) +{ + sb1250_clocksource_init(); + sb1250_clockevent_init(); +} diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 8b3ef0e..080c966 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -69,31 +69,6 @@ const char *get_system_type(void) return "SiByte " SIBYTE_BOARD_NAME; } -void __init plat_time_init(void) -{ -#if defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) - /* Setup HPT */ - sb1250_hpt_setup(); -#endif -} - -void __init plat_timer_setup(struct irqaction *irq) -{ - /* - * we don't set up irqaction, because we will deliver timer - * interrupts through low-level (direct) meachanism. - */ - - /* We only need to setup the generic timer */ -#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) - bcm1480_time_init(); -#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) - sb1250_time_init(); -#else -#error invalid SiByte board configuration -#endif -} - int swarm_be_handler(struct pt_regs *regs, int is_fixup) { if (!is_fixup && (regs->cp0_cause & 4)) { diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c index 39bb15f..4df070f 100644 --- a/arch/mips/sni/pcimt.c +++ b/arch/mips/sni/pcimt.c @@ -246,7 +246,7 @@ static void pcimt_hwint1(void) /* * Note: ASIC PCI's builtin interrupt achknowledge feature is * broken. Using it may result in loss of some or all i8259 - * interupts, so don't use PCIMT_INT_ACKNOWLEDGE ... + * interrupts, so don't use PCIMT_INT_ACKNOWLEDGE ... */ irq = i8259_irq(); if (unlikely(irq < 0)) diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index b808773..0910b35 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c @@ -121,15 +121,6 @@ void __init plat_time_init(void) setup_pit_timer(); } -/* - * R4k counter based timer interrupt. Works on RM200-225 and possibly - * others but not on RM400 - */ -static void __init sni_cpu_timer_setup(struct irqaction *irq) -{ - setup_irq(SNI_MIPS_IRQ_CPU_TIMER, irq); -} - void __init plat_timer_setup(struct irqaction *irq) { switch (sni_brd_type) { @@ -139,15 +130,6 @@ void __init plat_timer_setup(struct irqaction *irq) case SNI_BRD_MINITOWER: sni_a20r_timer_setup(irq); break; - - case SNI_BRD_PCI_TOWER: - case SNI_BRD_RM200: - case SNI_BRD_PCI_MTOWER: - case SNI_BRD_PCI_DESKTOP: - case SNI_BRD_PCI_TOWER_CPLUS: - case SNI_BRD_PCI_MTOWER_CPLUS: - sni_cpu_timer_setup(irq); - break; } } diff --git a/arch/mips/tx4927/common/tx4927_setup.c b/arch/mips/tx4927/common/tx4927_setup.c index 8ce0989..36c5f20 100644 --- a/arch/mips/tx4927/common/tx4927_setup.c +++ b/arch/mips/tx4927/common/tx4927_setup.c @@ -72,22 +72,6 @@ void __init plat_time_init(void) #endif } -void __init plat_timer_setup(struct irqaction *irq) -{ - setup_irq(TX4927_IRQ_CPU_TIMER, irq); - -#ifdef CONFIG_TOSHIBA_RBTX4927 - { - extern void toshiba_rbtx4927_timer_setup(struct irqaction - *irq); - toshiba_rbtx4927_timer_setup(irq); - } -#endif - - return; -} - - #ifdef DEBUG void print_cp0(char *key, int num, char *name, u32 val) { diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c index b97102a..c7470fb 100644 --- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c +++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c @@ -94,7 +94,6 @@ #define TOSHIBA_RBTX4927_SETUP_EFWFU ( 1 << 3 ) #define TOSHIBA_RBTX4927_SETUP_SETUP ( 1 << 4 ) #define TOSHIBA_RBTX4927_SETUP_TIME_INIT ( 1 << 5 ) -#define TOSHIBA_RBTX4927_SETUP_TIMER_SETUP ( 1 << 6 ) #define TOSHIBA_RBTX4927_SETUP_PCIBIOS ( 1 << 7 ) #define TOSHIBA_RBTX4927_SETUP_PCI1 ( 1 << 8 ) #define TOSHIBA_RBTX4927_SETUP_PCI2 ( 1 << 9 ) @@ -108,7 +107,6 @@ static const u32 toshiba_rbtx4927_setup_debug_flag = (TOSHIBA_RBTX4927_SETUP_NONE | TOSHIBA_RBTX4927_SETUP_INFO | TOSHIBA_RBTX4927_SETUP_WARN | TOSHIBA_RBTX4927_SETUP_EROR | TOSHIBA_RBTX4927_SETUP_EFWFU | TOSHIBA_RBTX4927_SETUP_SETUP | - TOSHIBA_RBTX4927_SETUP_TIME_INIT | TOSHIBA_RBTX4927_SETUP_TIMER_SETUP | TOSHIBA_RBTX4927_SETUP_PCIBIOS | TOSHIBA_RBTX4927_SETUP_PCI1 | TOSHIBA_RBTX4927_SETUP_PCI2 | TOSHIBA_RBTX4927_SETUP_PCI66); #endif @@ -947,14 +945,6 @@ toshiba_rbtx4927_time_init(void) } -void __init toshiba_rbtx4927_timer_setup(struct irqaction *irq) -{ - TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIMER_SETUP, - "-\n"); - TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIMER_SETUP, - "+\n"); -} - static int __init toshiba_rbtx4927_rtc_init(void) { static struct resource __initdata res = { diff --git a/arch/mips/tx4938/common/setup.c b/arch/mips/tx4938/common/setup.c index ab40822..3ba4101 100644 --- a/arch/mips/tx4938/common/setup.c +++ b/arch/mips/tx4938/common/setup.c @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <linux/random.h> #include <linux/irq.h> -#include <asm/bitops.h> +#include <linux/bitops.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/irq.h> @@ -43,8 +43,3 @@ plat_mem_setup(void) { toshiba_rbtx4938_setup(); } - -void __init plat_timer_setup(struct irqaction *irq) -{ - setup_irq(TX4938_IRQ_CPU_TIMER, irq); -} diff --git a/arch/mips/vr41xx/Kconfig b/arch/mips/vr41xx/Kconfig index 8f4d3e7..eeb089f 100644 --- a/arch/mips/vr41xx/Kconfig +++ b/arch/mips/vr41xx/Kconfig @@ -5,6 +5,7 @@ choice config CASIO_E55 bool "CASIO CASSIOPEIA E-10/15/55/65" + select CEVT_R4K select DMA_NONCOHERENT select IRQ_CPU select ISA @@ -13,6 +14,7 @@ config CASIO_E55 config IBM_WORKPAD bool "IBM WorkPad z50" + select CEVT_R4K select DMA_NONCOHERENT select IRQ_CPU select ISA @@ -21,6 +23,7 @@ config IBM_WORKPAD config NEC_CMBVR4133 bool "NEC CMB-VR4133" + select CEVT_R4K select DMA_NONCOHERENT select IRQ_CPU select HW_HAS_PCI @@ -29,6 +32,7 @@ config NEC_CMBVR4133 config TANBAC_TB022X bool "TANBAC VR4131 multichip module and TANBAC VR4131DIMM" + select CEVT_R4K select DMA_NONCOHERENT select IRQ_CPU select HW_HAS_PCI @@ -43,6 +47,7 @@ config TANBAC_TB022X config VICTOR_MPC30X bool "Victor MP-C303/304" + select CEVT_R4K select DMA_NONCOHERENT select IRQ_CPU select HW_HAS_PCI @@ -52,6 +57,7 @@ config VICTOR_MPC30X config ZAO_CAPCELLA bool "ZAO Networks Capcella" + select CEVT_R4K select DMA_NONCOHERENT select IRQ_CPU select HW_HAS_PCI diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c index 407cec2..8d760df 100644 --- a/arch/mips/vr41xx/common/init.c +++ b/arch/mips/vr41xx/common/init.c @@ -48,11 +48,6 @@ void __init plat_time_init(void) mips_hpt_frequency = tclock / 4; } -void __init plat_timer_setup(struct irqaction *irq) -{ - setup_irq(TIMER_IRQ, irq); -} - void __init plat_mem_setup(void) { vr41xx_calculate_clock_frequency(); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 3d73545..b8ef178 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -267,7 +267,7 @@ source "drivers/Kconfig" source "fs/Kconfig" -source "arch/parisc/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" source "arch/parisc/Kconfig.debug" diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index fb35ebc..2ce3806 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -181,7 +181,7 @@ give_sigsegv: si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; - si.si_pid = current->pid; + si.si_pid = task_pid_vnr(current); si.si_uid = current->uid; si.si_addr = &frame->uc; force_sig_info(SIGSEGV, &si, current); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index bbf029a..99fd569 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -219,7 +219,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) return; /* STFU */ printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", - current->comm, current->pid, str, err, regs->iaoq[0]); + current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); #ifdef PRINT_USER_FAULTS /* XXX for debugging only */ show_regs(regs); @@ -252,7 +252,7 @@ KERN_CRIT " || ||\n"); if (err) printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", - current->comm, current->pid, str, err); + current->comm, task_pid_nr(current), str, err); /* Wot's wrong wif bein' racy? */ if (current->thread.flags & PARISC_KERNEL_DEATH) { @@ -317,7 +317,7 @@ static void handle_break(struct pt_regs *regs) if (unlikely(iir != GDB_BREAK_INSN)) { printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", iir & 31, (iir>>13) & ((1<<13)-1), - current->pid, current->comm); + task_pid_nr(current), current->comm); show_regs(regs); } #endif @@ -747,7 +747,7 @@ void handle_interruption(int code, struct pt_regs *regs) if (user_mode(regs)) { #ifdef PRINT_USER_FAULTS printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n", - current->pid, current->comm); + task_pid_nr(current), current->comm); show_regs(regs); #endif /* SIGBUS, for lack of a better one. */ @@ -772,7 +772,7 @@ void handle_interruption(int code, struct pt_regs *regs) else printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", code); - printk("pid=%d command='%s'\n", current->pid, current->comm); + printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm); show_regs(regs); #endif si.si_signo = SIGSEGV; diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 347bb92..aebf3c1 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -469,7 +469,7 @@ void handle_unaligned(struct pt_regs *regs) && ++unaligned_count < 5) { char buf[256]; sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", - current->comm, current->pid, regs->ior, regs->iaoq[0]); + current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]); printk(KERN_WARNING "%s", buf); #ifdef DEBUG_UNALIGNED show_regs(regs); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 1c091b4..b2e3e9a 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -211,7 +211,7 @@ bad_area: #ifdef PRINT_USER_FAULTS printk(KERN_DEBUG "\n"); printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", - tsk->pid, tsk->comm, code, address); + task_pid_nr(tsk), tsk->comm, code, address); if (vma) { printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", vma->vm_start, vma->vm_end); diff --git a/arch/parisc/oprofile/Kconfig b/arch/parisc/oprofile/Kconfig deleted file mode 100644 index 5ade198..0000000 --- a/arch/parisc/oprofile/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ - -menu "Profiling support" - depends on EXPERIMENTAL - -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -endmenu - diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3763f68..18f397ca 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -669,20 +669,7 @@ source "arch/powerpc/sysdev/qe_lib/Kconfig" source "lib/Kconfig" -menu "Instrumentation Support" - -source "arch/powerpc/oprofile/Kconfig" - -config KPROBES - bool "Kprobes" - depends on !BOOKE && !4xx && KALLSYMS && MODULES - help - Kprobes allows you to trap at almost any kernel address and - execute a callback function. register_kprobe() establishes - a probepoint and specifies the callback. Kprobes is useful - for kernel debugging, non-intrusive instrumentation and testing. - If in doubt, say "N". -endmenu +source "kernel/Kconfig.instrumentation" source "arch/powerpc/Kconfig.debug" diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 8b47c84..dcd7c02 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig @@ -68,6 +68,7 @@ CONFIG_SYSVIPC_SYSCTL=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 +CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_SYSFS_DEPRECATED=y # CONFIG_RELAY is not set diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 95b823b..8e5988c 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -209,7 +209,6 @@ CONFIG_PM=y # CONFIG_PM_LEGACY is not set CONFIG_PM_DEBUG=y # CONFIG_PM_VERBOSE is not set -# CONFIG_DISABLE_CONSOLE_SUSPEND is not set CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_HIBERNATION=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index bb8d4e4..05582af 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -71,6 +71,7 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_SYSFS_DEPRECATED=y CONFIG_RELAY=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index c09eb8c..62a3840 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -71,6 +71,7 @@ CONFIG_AUDITSYSCALL=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_SYSFS_DEPRECATED=y # CONFIG_RELAY is not set diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0ae5d57..2c8e756 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -141,6 +141,7 @@ int main(void) DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); + DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 8b4a4ee..f1ee0b3 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -113,7 +113,7 @@ setup_604_hid0: * around #3 and with the same fix we use. We may want to * check if the CPU is using 60x bus mode in which case * the workaround for errata #4 is useless. Also, we may - * want to explicitely clear HID0_NOPDST as this is not + * want to explicitly clear HID0_NOPDST as this is not * needed once we have applied workaround #5 (though it's * not set by Apple's firmware at least). */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 2250f9e..b0e5deb 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -491,7 +491,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, /* Legacy flags are left to default at this point, * one can then use irq_create_mapping() to - * explicitely change them + * explicitly change them */ ops->map(host, i, i); } diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 858f28a..2a2f3c3 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -1,6 +1,6 @@ /* L2CR functions - Copyright © 1997-1998 by PowerLogix R & D, Inc. + Copyright © 1997-1998 by PowerLogix R & D, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index e60a0c5..c0c8e8c 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -61,45 +61,39 @@ NORET_TYPE void machine_kexec(struct kimage *image) for(;;); } -static int __init early_parse_crashk(char *p) -{ - unsigned long size; - - if (!p) - return 1; - - size = memparse(p, &p); - - if (*p == '@') - crashk_res.start = memparse(p + 1, &p); - else - crashk_res.start = KDUMP_KERNELBASE; - - crashk_res.end = crashk_res.start + size - 1; - - return 0; -} -early_param("crashkernel", early_parse_crashk); - void __init reserve_crashkernel(void) { - unsigned long size; + unsigned long long crash_size, crash_base; + int ret; + + /* this is necessary because of lmb_phys_mem_size() */ + lmb_analyze(); + + /* use common parsing */ + ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), + &crash_size, &crash_base); + if (ret == 0 && crash_size > 0) { + if (crash_base == 0) + crash_base = KDUMP_KERNELBASE; + crashk_res.start = crash_base; + } else { + /* handle the device tree */ + crash_size = crashk_res.end - crashk_res.start + 1; + } - if (crashk_res.start == 0) + if (crash_size == 0) return; /* We might have got these values via the command line or the * device tree, either way sanitise them now. */ - size = crashk_res.end - crashk_res.start + 1; - if (crashk_res.start != KDUMP_KERNELBASE) printk("Crash kernel location must be 0x%x\n", KDUMP_KERNELBASE); crashk_res.start = KDUMP_KERNELBASE; - size = PAGE_ALIGN(size); - crashk_res.end = crashk_res.start + size - 1; + crash_size = PAGE_ALIGN(crash_size); + crashk_res.end = crashk_res.start + crash_size - 1; /* Crash kernel trumps memory limit */ if (memory_limit && memory_limit <= crashk_res.end) { @@ -108,7 +102,13 @@ void __init reserve_crashkernel(void) memory_limit); } - lmb_reserve(crashk_res.start, size); + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crashk_res.start >> 20), + (unsigned long)(lmb_phys_mem_size() >> 20)); + + lmb_reserve(crashk_res.start, crash_size); } int overlaps_crashkernel(unsigned long start, unsigned long size) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ea6ad7a..b9d8837 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -459,7 +459,7 @@ void show_regs(struct pt_regs * regs) printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); #endif printk("TASK = %p[%d] '%s' THREAD: %p", - current, current->pid, current->comm, task_thread_info(current)); + current, task_pid_nr(current), current->comm, task_thread_info(current)); #ifdef CONFIG_SMP printk(" CPU: %d", smp_processor_id()); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 863a5d6..9eb3284 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -212,23 +212,44 @@ static u64 read_purr(void) } /* + * Read the SPURR on systems that have it, otherwise the purr + */ +static u64 read_spurr(u64 purr) +{ + if (cpu_has_feature(CPU_FTR_SPURR)) + return mfspr(SPRN_SPURR); + return purr; +} + +/* * Account time for a transition between system, hard irq * or soft irq state. */ void account_system_vtime(struct task_struct *tsk) { - u64 now, delta; + u64 now, nowscaled, delta, deltascaled; unsigned long flags; local_irq_save(flags); now = read_purr(); delta = now - get_paca()->startpurr; get_paca()->startpurr = now; + nowscaled = read_spurr(now); + deltascaled = nowscaled - get_paca()->startspurr; + get_paca()->startspurr = nowscaled; if (!in_interrupt()) { + /* deltascaled includes both user and system time. + * Hence scale it based on the purr ratio to estimate + * the system time */ + deltascaled = deltascaled * get_paca()->system_time / + (get_paca()->system_time + get_paca()->user_time); delta += get_paca()->system_time; get_paca()->system_time = 0; } account_system_time(tsk, 0, delta); + get_paca()->purrdelta = delta; + account_system_time_scaled(tsk, deltascaled); + get_paca()->spurrdelta = deltascaled; local_irq_restore(flags); } @@ -240,11 +261,17 @@ void account_system_vtime(struct task_struct *tsk) */ void account_process_vtime(struct task_struct *tsk) { - cputime_t utime; + cputime_t utime, utimescaled; utime = get_paca()->user_time; get_paca()->user_time = 0; account_user_time(tsk, utime); + + /* Estimate the scaled utime by scaling the real utime based + * on the last spurr to purr ratio */ + utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta; + get_paca()->spurrdelta = get_paca()->purrdelta = 0; + account_user_time_scaled(tsk, utimescaled); } static void account_process_time(struct pt_regs *regs) @@ -266,6 +293,7 @@ struct cpu_purr_data { int initialized; /* thread is running */ u64 tb; /* last TB value read */ u64 purr; /* last PURR value read */ + u64 spurr; /* last SPURR value read */ }; /* diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bf9e39c..59c464e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -201,7 +201,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) * generate the same exception over and over again and we get * nowhere. Better to kill it and let the kernel panic. */ - if (is_init(current)) { + if (is_global_init(current)) { __sighandler_t handler; spin_lock_irq(¤t->sighand->siglock); @@ -881,7 +881,7 @@ void nonrecoverable_exception(struct pt_regs *regs) void trace_syscall(struct pt_regs *regs) { printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", - current, current->pid, regs->nip, regs->link, regs->gpr[0], + current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index ab3546c..a18fda3 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -375,7 +375,7 @@ bad_area_nosemaphore: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/powerpc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig deleted file mode 100644 index 7089e79..0000000 --- a/arch/powerpc/oprofile/Kconfig +++ /dev/null @@ -1,24 +0,0 @@ -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -config OPROFILE_CELL - bool "OProfile for Cell Broadband Engine" - depends on (SPU_FS = y && OPROFILE = m) || (SPU_FS = y && OPROFILE = y) || (SPU_FS = m && OPROFILE = m) - default y - help - Profiling of Cell BE SPUs requires special support enabled - by this option. diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index f26afcd..ffa14af 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c @@ -1,5 +1,5 @@ #include <linux/init.h> -#include <linux/pm.h> +#include <linux/suspend.h> #include <asm/io.h> #include <asm/time.h> #include <asm/mpc52xx.h> @@ -18,6 +18,8 @@ static void __iomem *sram; static const int sram_size = 0x4000; /* 16 kBytes */ static void __iomem *mbar; +static suspend_state_t lite5200_pm_target_state; + static int lite5200_pm_valid(suspend_state_t state) { switch (state) { @@ -29,13 +31,22 @@ static int lite5200_pm_valid(suspend_state_t state) } } -static int lite5200_pm_prepare(suspend_state_t state) +static int lite5200_pm_set_target(suspend_state_t state) +{ + if (lite5200_pm_valid(state)) { + lite5200_pm_target_state = state; + return 0; + } + return -EINVAL; +} + +static int lite5200_pm_prepare(void) { /* deep sleep? let mpc52xx code handle that */ - if (state == PM_SUSPEND_STANDBY) - return mpc52xx_pm_prepare(state); + if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) + return mpc52xx_pm_prepare(); - if (state != PM_SUSPEND_MEM) + if (lite5200_pm_target_state != PM_SUSPEND_MEM) return -EINVAL; /* map registers */ @@ -190,17 +201,16 @@ static int lite5200_pm_enter(suspend_state_t state) return 0; } -static int lite5200_pm_finish(suspend_state_t state) +static void lite5200_pm_finish(void) { /* deep sleep? let mpc52xx code handle that */ - if (state == PM_SUSPEND_STANDBY) { - return mpc52xx_pm_finish(state); - } - return 0; + if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) + mpc52xx_pm_finish(); } -static struct pm_ops lite5200_pm_ops = { +static struct platform_suspend_ops lite5200_pm_ops = { .valid = lite5200_pm_valid, + .set_target = lite5200_pm_set_target, .prepare = lite5200_pm_prepare, .enter = lite5200_pm_enter, .finish = lite5200_pm_finish, @@ -208,6 +218,6 @@ static struct pm_ops lite5200_pm_ops = { int __init lite5200_pm_init(void) { - pm_set_ops(&lite5200_pm_ops); + suspend_set_ops(&lite5200_pm_ops); return 0; } diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index ee2e763..7ffa7ba 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c @@ -1,5 +1,5 @@ #include <linux/init.h> -#include <linux/pm.h> +#include <linux/suspend.h> #include <linux/io.h> #include <asm/time.h> #include <asm/cacheflush.h> @@ -57,11 +57,8 @@ int mpc52xx_set_wakeup_gpio(u8 pin, u8 level) return 0; } -int mpc52xx_pm_prepare(suspend_state_t state) +int mpc52xx_pm_prepare(void) { - if (state != PM_SUSPEND_STANDBY) - return -EINVAL; - /* map the whole register space */ mbar = mpc52xx_find_and_map("mpc5200"); if (!mbar) { @@ -166,18 +163,16 @@ int mpc52xx_pm_enter(suspend_state_t state) return 0; } -int mpc52xx_pm_finish(suspend_state_t state) +void mpc52xx_pm_finish(void) { /* call board resume code */ if (mpc52xx_suspend.board_resume_finish) mpc52xx_suspend.board_resume_finish(mbar); iounmap(mbar); - - return 0; } -static struct pm_ops mpc52xx_pm_ops = { +static struct platform_suspend_ops mpc52xx_pm_ops = { .valid = mpc52xx_pm_valid, .prepare = mpc52xx_pm_prepare, .enter = mpc52xx_pm_enter, @@ -186,6 +181,6 @@ static struct pm_ops mpc52xx_pm_ops = { int __init mpc52xx_pm_init(void) { - pm_set_ops(&mpc52xx_pm_ops); + suspend_set_ops(&mpc52xx_pm_ops); return 0; } diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 229d355..ea22cad 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -120,7 +120,7 @@ config PPC_PMI depends on PPC_IBM_CELL_BLADE help PMI (Platform Management Interrupt) is a way to - communicate with the BMC (Baseboard Mangement Controller). + communicate with the BMC (Baseboard Management Controller). It is used in some IBM Cell blades. default m diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index d72b16d..d9e56a5 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -748,7 +748,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, if (count) goto out; - /* write aÑ• much as possible */ + /* write as much as possible */ for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { int ret; ret = __get_user(wbox_data, udata); diff --git a/arch/powerpc/platforms/celleb/scc_uhc.c b/arch/powerpc/platforms/celleb/scc_uhc.c index a7c548b..b59c38a 100644 --- a/arch/powerpc/platforms/celleb/scc_uhc.c +++ b/arch/powerpc/platforms/celleb/scc_uhc.c @@ -36,7 +36,7 @@ static inline int uhc_clkctrl_ready(u32 val) } /* - * UHC(usb host controler) enable function. + * UHC(usb host controller) enable function. * affect to both of OHCI and EHCI core module. */ static void enable_scc_uhc(struct pci_dev *dev) diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 354c058..144177d 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -41,13 +41,13 @@ #include <linux/root_dev.h> #include <linux/serial.h> #include <linux/smp.h> +#include <linux/bitops.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/system.h> #include <asm/pgtable.h> -#include <asm/bitops.h> #include <asm/io.h> #include <asm/kexec.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 3a393c7..a1ab25c 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -332,7 +332,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err) err->disposition == RTAS_DISP_NOT_RECOVERED && err->target == RTAS_TARGET_MEMORY && err->type == RTAS_TYPE_ECC_UNCORR && - !(current->pid == 0 || is_init(current))) { + !(current->pid == 0 || is_global_init(current))) { /* Kill off a user process with an ECC error */ printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n", current->pid); diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 607925c..6473fa7 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig @@ -1317,7 +1317,7 @@ endmenu source "lib/Kconfig" -source "arch/powerpc/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" source "arch/ppc/Kconfig.debug" diff --git a/arch/ppc/boot/Makefile b/arch/ppc/boot/Makefile index 487dc66..500497e 100644 --- a/arch/ppc/boot/Makefile +++ b/arch/ppc/boot/Makefile @@ -13,6 +13,8 @@ # modified by Cort (cort@cs.nmt.edu) # +# KBUILD_CFLAGS used when building rest of boot (takes effect recursively) +KBUILD_CFLAGS += -fno-builtin -D__BOOTER__ -Iarch/$(ARCH)/boot/include HOSTCFLAGS += -Iarch/$(ARCH)/boot/include BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index 3f3b292..c785689 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c @@ -121,7 +121,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) * generate the same exception over and over again and we get * nowhere. Better to kill it and let the kernel panic. */ - if (is_init(current)) { + if (is_global_init(current)) { __sighandler_t handler; spin_lock_irq(¤t->sighand->siglock); diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c index 94913dd..254c23b 100644 --- a/arch/ppc/mm/fault.c +++ b/arch/ppc/mm/fault.c @@ -290,7 +290,7 @@ bad_area: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c index 248684f..dcd6070 100644 --- a/arch/ppc/platforms/chestnut.c +++ b/arch/ppc/platforms/chestnut.c @@ -49,7 +49,6 @@ extern void gen550_progress(char *, unsigned short); extern void gen550_init(int, struct uart_port *); extern void mv64360_pcibios_fixup(mv64x60_handle_t *bh); -#define BIT(x) (1<<x) #define CHESTNUT_PRESERVE_MASK (BIT(MV64x60_CPU2DEV_0_WIN) | \ BIT(MV64x60_CPU2DEV_1_WIN) | \ BIT(MV64x60_CPU2DEV_2_WIN) | \ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b711321..4ec716d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -529,21 +529,7 @@ source "drivers/Kconfig" source "fs/Kconfig" -menu "Instrumentation Support" - -source "arch/s390/oprofile/Kconfig" - -config KPROBES - bool "Kprobes (EXPERIMENTAL)" - depends on EXPERIMENTAL && MODULES - help - Kprobes allows you to trap at almost any kernel address and - execute a callback function. register_kprobe() establishes - a probepoint and specifies the callback. Kprobes is useful - for kernel debugging, non-intrusive instrumentation and testing. - If in doubt, say "N". - -endmenu +source "kernel/Kconfig.instrumentation" source "arch/s390/Kconfig.debug" diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index abb447a..70c5737 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -166,7 +166,7 @@ void show_regs(struct pt_regs *regs) printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted()); printk("Process %s (pid: %d, task: %p, ksp: %p)\n", - current->comm, current->pid, (void *) tsk, + current->comm, task_pid_nr(current), (void *) tsk, (void *) tsk->thread.ksp); show_registers(regs); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 60604b2..b159a9d 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -64,7 +64,7 @@ out: out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 14c241c..2456b52 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -211,7 +211,7 @@ static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code, struct mm_struct *mm = tsk->mm; up_read(&mm->mmap_sem); - if (is_init(tsk)) { + if (is_global_init(tsk)) { yield(); down_read(&mm->mmap_sem); return 1; diff --git a/arch/s390/oprofile/Kconfig b/arch/s390/oprofile/Kconfig deleted file mode 100644 index 208220a..0000000 --- a/arch/s390/oprofile/Kconfig +++ /dev/null @@ -1,22 +0,0 @@ - -menu "Profiling support" - -config PROFILING - bool "Profiling support" - help - Say Y here to enable profiling support mechanisms used by - profilers such as readprofile or OProfile. - - -config OPROFILE - tristate "OProfile system profiling" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -endmenu - diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 44982c1..247f8a6 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -758,7 +758,7 @@ source "drivers/Kconfig" source "fs/Kconfig" -source "arch/sh/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" source "arch/sh/Kconfig.debug" diff --git a/arch/sh/boards/hp6xx/pm.c b/arch/sh/boards/hp6xx/pm.c index 8143d1b..d22f6ea 100644 --- a/arch/sh/boards/hp6xx/pm.c +++ b/arch/sh/boards/hp6xx/pm.c @@ -67,14 +67,14 @@ static int hp6x0_pm_enter(suspend_state_t state) return 0; } -static struct pm_ops hp6x0_pm_ops = { +static struct platform_suspend_ops hp6x0_pm_ops = { .enter = hp6x0_pm_enter, - .valid = pm_valid_only_mem, + .valid = suspend_valid_only_mem, }; static int __init hp6x0_pm_init(void) { - pm_set_ops(&hp6x0_pm_ops); + suspend_set_ops(&hp6x0_pm_ops); return 0; } diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 790ed69..5c17de5 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -104,24 +104,3 @@ NORET_TYPE void machine_kexec(struct kimage *image) (*rnk)(page_list, reboot_code_buffer, image->start, vbr_reg); } -/* crashkernel=size@addr specifies the location to reserve for - * a crash kernel. By reserving this memory we guarantee - * that linux never sets it up as a DMA target. - * Useful for holding code to do something appropriate - * after a kernel panic. - */ -static int __init parse_crashkernel(char *arg) -{ - unsigned long size, base; - size = memparse(arg, &arg); - if (*arg == '@') { - base = memparse(arg+1, &arg); - /* FIXME: Do I want a sanity check - * to validate the memory range? - */ - crashk_res.start = base; - crashk_res.end = base + size - 1; - } - return 0; -} -early_param("crashkernel", parse_crashkernel); diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index b446999..6d7f2b0 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -121,7 +121,7 @@ void machine_power_off(void) void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid : %d, Comm: %20s\n", current->pid, current->comm); + printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm); print_symbol("PC is at %s\n", instruction_pointer(regs)); printk("PC : %08lx SP : %08lx SR : %08lx ", regs->pc, regs->regs[15], regs->sr); diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index b3027a6..b749403 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -128,6 +128,37 @@ static void __init register_bootmem_low_pages(void) free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages)); } +#ifdef CONFIG_KEXEC +static void __init reserve_crashkernel(void) +{ + unsigned long long free_mem; + unsigned long long crash_size, crash_base; + int ret; + + free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; + + ret = parse_crashkernel(boot_command_line, free_mem, + &crash_size, &crash_base); + if (ret == 0 && crash_size) { + if (crash_base > 0) { + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(free_mem >> 20)); + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + reserve_bootmem(crash_base, crash_size); + } else + printk(KERN_INFO "crashkernel reservation failed - " + "you have to specify a base address\n"); + } +} +#else +static inline void __init reserve_crashkernel(void) +{} +#endif + void __init setup_bootmem_allocator(unsigned long free_pfn) { unsigned long bootmap_size; @@ -189,11 +220,8 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) } } #endif -#ifdef CONFIG_KEXEC - if (crashk_res.start != crashk_res.end) - reserve_bootmem(crashk_res.start, - crashk_res.end - crashk_res.start + 1); -#endif + + reserve_crashkernel(); } #ifndef CONFIG_NEED_MULTIPLE_NODES diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c index 2f42442..ca754fd 100644 --- a/arch/sh/kernel/signal.c +++ b/arch/sh/kernel/signal.c @@ -382,7 +382,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, set_fs(USER_DS); pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", - current->comm, current->pid, frame, regs->pc, regs->pr); + current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); flush_cache_sigtramp(regs->pr); @@ -462,7 +462,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, set_fs(USER_DS); pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", - current->comm, current->pid, frame, regs->pc, regs->pr); + current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); flush_cache_sigtramp(regs->pr); diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index dcb46e7..cf99111c 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -95,8 +95,8 @@ void die(const char * str, struct pt_regs * regs, long err) print_modules(); show_regs(regs); - printk("Process: %s (pid: %d, stack limit = %p)\n", - current->comm, current->pid, task_stack_page(current) + 1); + printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm, + task_pid_nr(current), task_stack_page(current) + 1); if (!user_mode(regs) || in_interrupt()) dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + @@ -386,7 +386,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) printk(KERN_NOTICE "Fixing up unaligned userspace access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", - current->comm,current->pid,(u16*)regs->pc,instruction); + current->comm, task_pid_nr(current), + (u16 *)regs->pc, instruction); } ret = -EFAULT; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 4729668..f33cedb 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -207,7 +207,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/sh/oprofile/Kconfig b/arch/sh/oprofile/Kconfig deleted file mode 100644 index 5ade198..0000000 --- a/arch/sh/oprofile/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ - -menu "Profiling support" - depends on EXPERIMENTAL - -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -endmenu - diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig index b3327ce..ba204ba 100644 --- a/arch/sh64/Kconfig +++ b/arch/sh64/Kconfig @@ -284,7 +284,7 @@ source "drivers/Kconfig" source "fs/Kconfig" -source "arch/sh64/oprofile/Kconfig" +source "kernel/Kconfig.instrumentation" source "arch/sh64/Kconfig.debug" diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c index 388bb71..b4d9534 100644 --- a/arch/sh64/kernel/pci_sh5.c +++ b/arch/sh64/kernel/pci_sh5.c @@ -480,7 +480,7 @@ static int __init pcibios_init(void) return -EINVAL; } - /* The pci subsytem needs to know where memory is and how much + /* The pci subsystem needs to know where memory is and how much * of it there is. I've simply made these globals. A better mechanism * is probably needed. */ diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c index 9d0d58f..c03101f 100644 --- a/arch/sh64/kernel/traps.c +++ b/arch/sh64/kernel/traps.c @@ -764,7 +764,7 @@ static int misaligned_fixup(struct pt_regs *regs) --user_mode_unaligned_fixup_count; /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", - current->comm, current->pid, (__u32)regs->pc, opcode); + current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); } else #endif if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { @@ -774,7 +774,7 @@ static int misaligned_fixup(struct pt_regs *regs) (__u32)regs->pc, opcode); } else { printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", - current->comm, current->pid, (__u32)regs->pc, opcode); + current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); } } diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c index dd81c66..7c79a1b 100644 --- a/arch/sh64/mm/fault.c +++ b/arch/sh64/mm/fault.c @@ -81,7 +81,7 @@ static inline void print_vma(struct vm_area_struct *vma) static inline void print_task(struct task_struct *tsk) { - printk("Task pid %d\n", tsk->pid); + printk("Task pid %d\n", task_pid_nr(tsk)); } static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address) @@ -272,13 +272,13 @@ bad_area: * usermode, so only need a few */ count++; printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n", - address, current->pid, current->comm, + address, task_pid_nr(current), current->comm, (unsigned long) regs->pc); #if 0 show_regs(regs); #endif } - if (is_init(tsk)) { + if (is_global_init(tsk)) { panic("INIT had user mode bad_area\n"); } tsk->thread.address = address; @@ -320,14 +320,14 @@ no_context: * us unable to handle the page fault gracefully. */ out_of_memory: - if (is_init(current)) { + if (is_global_init(current)) { panic("INIT out of memory\n"); yield(); goto survive; } printk("fault:Out of memory\n"); up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/sh64/oprofile/Kconfig b/arch/sh64/oprofile/Kconfig deleted file mode 100644 index 19d3773..0000000 --- a/arch/sh64/oprofile/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ - -menu "Profiling support" - depends on EXPERIMENTAL - -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - -endmenu - diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index c0f4ba1..527adc8 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -320,11 +320,7 @@ endmenu source "fs/Kconfig" -menu "Instrumentation Support" - -source "arch/sparc/oprofile/Kconfig" - -endmenu +source "kernel/Kconfig.instrumentation" source "arch/sparc/Kconfig.debug" diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c index fb2caef..3ea000d 100644 --- a/arch/sparc/kernel/of_device.c +++ b/arch/sparc/kernel/of_device.c @@ -585,24 +585,6 @@ static int __init of_debug(char *str) __setup("of_debug=", of_debug); -int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) -{ - /* initialize common driver fields */ - if (!drv->driver.name) - drv->driver.name = drv->name; - if (!drv->driver.owner) - drv->driver.owner = drv->owner; - drv->driver.bus = bus; - - /* register with core */ - return driver_register(&drv->driver); -} - -void of_unregister_driver(struct of_platform_driver *drv) -{ - driver_unregister(&drv->driver); -} - struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id, struct device *parent, @@ -628,6 +610,4 @@ struct of_device* of_platform_device_create(struct device_node *np, return dev; } -EXPORT_SYMBOL(of_register_driver); -EXPORT_SYMBOL(of_unregister_driver); EXPORT_SYMBOL(of_platform_device_create); diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c index 003f8ee..fe562db 100644 --- a/arch/sparc/kernel/ptrace.c +++ b/arch/sparc/kernel/ptrace.c @@ -155,7 +155,7 @@ static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset, /* Rest of them are completely unsupported. */ default: printk("%s [%d]: Wants to read user offset %ld\n", - current->comm, current->pid, offset); + current->comm, task_pid_nr(current), offset); pt_error_return(regs, EIO); return; } @@ -222,7 +222,7 @@ static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset, /* Rest of them are completely unsupported or "no-touch". */ default: printk("%s [%d]: Wants to write user offset %ld\n", - current->comm, current->pid, offset); + current->comm, task_pid_nr(current), offset); goto failure; } success: diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c index 6c0221e..42bf09d 100644 --- a/arch/sparc/kernel/sys_sparc.c +++ b/arch/sparc/kernel/sys_sparc.c @@ -357,7 +357,7 @@ c_sys_nis_syscall (struct pt_regs *regs) if (count++ > 5) return -ENOSYS; printk ("%s[%d]: Unimplemented SPARC system call %d\n", - current->comm, current->pid, (int)regs->u_regs[1]); + current->comm, task_pid_nr(current), (int)regs->u_regs[1]); #ifdef DEBUG_UNIMP_SYSCALL show_regs (regs); #endif diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c index f807172..28c187c 100644 --- a/arch/sparc/kernel/sys_sunos.c +++ b/arch/sparc/kernel/sys_sunos.c @@ -866,7 +866,7 @@ asmlinkage int sunos_killpg(int pgrp, int sig) rcu_read_lock(); ret = -EINVAL; if (pgrp > 0) - ret = kill_pgrp(find_pid(pgrp), sig, 0); + ret = kill_pgrp(find_vpid(pgrp), sig, 0); rcu_read_unlock(); return ret; diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c index 3bc3bff..d404e79 100644 --- a/arch/sparc/kernel/traps.c +++ b/arch/sparc/kernel/traps.c @@ -38,7 +38,7 @@ struct trap_trace_entry trapbuf[1024]; void syscall_trace_entry(struct pt_regs *regs) { - printk("%s[%d]: ", current->comm, current->pid); + printk("%s[%d]: ", current->comm, task_pid_nr(current)); printk("scall<%d> (could be %d)\n", (int) regs->u_regs[UREG_G1], (int) regs->u_regs[UREG_I0]); } @@ -99,7 +99,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) " /_| \\__/ |_\\\n" " \\__U_/\n"); - printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter); + printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); show_regs(regs); add_taint(TAINT_DIE); diff --git a/arch/sparc/oprofile/Kconfig b/arch/sparc/oprofile/Kconfig deleted file mode 100644 index d8a8408..0000000 --- a/arch/sparc/oprofile/Kconfig +++ /dev/null @@ -1,17 +0,0 @@ -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 59c4d75..c7a74e3 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -460,20 +460,7 @@ source "drivers/fc4/Kconfig" source "fs/Kconfig" -menu "Instrumentation Support" - -source "arch/sparc64/oprofile/Kconfig" - -config KPROBES - bool "Kprobes (EXPERIMENTAL)" - depends on KALLSYMS && EXPERIMENTAL && MODULES - help - Kprobes allows you to trap at almost any kernel address and - execute a callback function. register_kprobe() establishes - a probepoint and specifies the callback. Kprobes is useful - for kernel debugging, non-intrusive instrumentation and testing. - If in doubt, say "N". -endmenu +source "kernel/Kconfig.instrumentation" source "arch/sparc64/Kconfig.debug" diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index f3922e5..2c3bea2 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -877,7 +877,7 @@ void __cpuinit sun4v_register_mondo_queues(int this_cpu) static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) { unsigned long size = PAGE_ALIGN(qmask + 1); - void *p = __alloc_bootmem_low(size, size, 0); + void *p = __alloc_bootmem(size, size, 0); if (!p) { prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); prom_halt(); @@ -889,7 +889,7 @@ static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask) { unsigned long size = PAGE_ALIGN(qmask + 1); - void *p = __alloc_bootmem_low(size, size, 0); + void *p = __alloc_bootmem(size, size, 0); if (!p) { prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); @@ -906,7 +906,7 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); - page = alloc_bootmem_low_pages(PAGE_SIZE); + page = alloc_bootmem_pages(PAGE_SIZE); if (!page) { prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); prom_halt(); @@ -953,7 +953,7 @@ void __init init_IRQ(void) kill_prom_timer(); size = sizeof(struct ino_bucket) * NUM_IVECS; - ivector_table = alloc_bootmem_low(size); + ivector_table = alloc_bootmem(size); if (!ivector_table) { prom_printf("Fatal error, cannot allocate ivector_table\n"); prom_halt(); diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index 42d7798..fc5c0cc 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c @@ -869,26 +869,6 @@ static int __init of_debug(char *str) __setup("of_debug=", of_debug); -int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) -{ - /* initialize common driver fields */ - if (!drv->driver.name) - drv->driver.name = drv->name; - if (!drv->driver.owner) - drv->driver.owner = drv->owner; - drv->driver.bus = bus; - - /* register with core */ - return driver_register(&drv->driver); -} -EXPORT_SYMBOL(of_register_driver); - -void of_unregister_driver(struct of_platform_driver *drv) -{ - driver_unregister(&drv->driver); -} -EXPORT_SYMBOL(of_unregister_driver); - struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id, struct device *parent, diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index c76bfbb..923e0bc 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -396,6 +396,13 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm) saw_mem = saw_io = 0; pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i); + if (!pbm_ranges) { + prom_printf("PCI: Fatal error, missing PBM ranges property " + " for %s\n", + pbm->name); + prom_halt(); + } + num_pbm_ranges = i / sizeof(*pbm_ranges); for (i = 0; i < num_pbm_ranges; i++) { diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c index 8f7a06e..170d6ca 100644 --- a/arch/sparc64/kernel/sys_sunos32.c +++ b/arch/sparc64/kernel/sys_sunos32.c @@ -831,7 +831,7 @@ asmlinkage int sunos_killpg(int pgrp, int sig) rcu_read_lock(); ret = -EINVAL; if (pgrp > 0) - ret = kill_pgrp(find_pid(pgrp), sig, 0); + ret = kill_pgrp(find_vpid(pgrp), sig, 0); rcu_read_unlock(); return ret; diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 34573a5..e9c7e4f 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2225,7 +2225,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) " /_| \\__/ |_\\\n" " \\__U_/\n"); - printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter); + printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV); __asm__ __volatile__("flushw"); __show_regs(regs); diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S index 9633750..70ac418 100644 --- a/arch/sparc64/lib/atomic.S +++ b/arch/sparc64/lib/atomic.S @@ -1,10 +1,10 @@ -/* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $ - * atomic.S: These things are too big to do inline. +/* atomic.S: These things are too big to do inline. * - * Copyright (C) 1999 David S. Miller (davem@redhat.com) + * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) */ #include <asm/asi.h> +#include <asm/backoff.h> .text @@ -16,27 +16,31 @@ .globl atomic_add .type atomic_add,#function atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 add %g1, %o0, %g7 cas [%o1], %g1, %g7 cmp %g1, %g7 - bne,pn %icc, 1b + bne,pn %icc, 2f nop retl nop +2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_add, .-atomic_add .globl atomic_sub .type atomic_sub,#function atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 sub %g1, %o0, %g7 cas [%o1], %g1, %g7 cmp %g1, %g7 - bne,pn %icc, 1b + bne,pn %icc, 2f nop retl nop +2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_sub, .-atomic_sub /* On SMP we need to use memory barriers to ensure @@ -60,89 +64,101 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ .globl atomic_add_ret .type atomic_add_ret,#function atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) ATOMIC_PRE_BARRIER 1: lduw [%o1], %g1 add %g1, %o0, %g7 cas [%o1], %g1, %g7 cmp %g1, %g7 - bne,pn %icc, 1b + bne,pn %icc, 2f add %g7, %o0, %g7 sra %g7, 0, %o0 ATOMIC_POST_BARRIER retl nop +2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_add_ret, .-atomic_add_ret .globl atomic_sub_ret .type atomic_sub_ret,#function atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) ATOMIC_PRE_BARRIER 1: lduw [%o1], %g1 sub %g1, %o0, %g7 cas [%o1], %g1, %g7 cmp %g1, %g7 - bne,pn %icc, 1b + bne,pn %icc, 2f sub %g7, %o0, %g7 sra %g7, 0, %o0 ATOMIC_POST_BARRIER retl nop +2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_sub_ret, .-atomic_sub_ret .globl atomic64_add .type atomic64_add,#function atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 add %g1, %o0, %g7 casx [%o1], %g1, %g7 cmp %g1, %g7 - bne,pn %xcc, 1b + bne,pn %xcc, 2f nop retl nop +2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_add, .-atomic64_add .globl atomic64_sub .type atomic64_sub,#function atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 sub %g1, %o0, %g7 casx [%o1], %g1, %g7 cmp %g1, %g7 - bne,pn %xcc, 1b + bne,pn %xcc, 2f nop retl nop +2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_sub, .-atomic64_sub .globl atomic64_add_ret .type atomic64_add_ret,#function atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) ATOMIC_PRE_BARRIER 1: ldx [%o1], %g1 add %g1, %o0, %g7 casx [%o1], %g1, %g7 cmp %g1, %g7 - bne,pn %xcc, 1b + bne,pn %xcc, 2f add %g7, %o0, %g7 mov %g7, %o0 ATOMIC_POST_BARRIER retl nop +2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_add_ret, .-atomic64_add_ret .globl atomic64_sub_ret .type atomic64_sub_ret,#function atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) ATOMIC_PRE_BARRIER 1: ldx [%o1], %g1 sub %g1, %o0, %g7 casx [%o1], %g1, %g7 cmp %g1, %g7 - bne,pn %xcc, 1b + bne,pn %xcc, 2f sub %g7, %o0, %g7 mov %g7, %o0 ATOMIC_POST_BARRIER retl nop +2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_sub_ret, .-atomic64_sub_ret diff --git a/arch/sparc64/lib/bitops.S b/arch/sparc64/lib/bitops.S index 892431a..6b015a6 100644 --- a/arch/sparc64/lib/bitops.S +++ b/arch/sparc64/lib/bitops.S @@ -1,10 +1,10 @@ -/* $Id: bitops.S,v 1.3 2001/11/18 00:12:56 davem Exp $ - * bitops.S: Sparc64 atomic bit operations. +/* bitops.S: Sparc64 atomic bit operations. * - * Copyright (C) 2000 David S. Miller (davem@redhat.com) + * Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net) */ #include <asm/asi.h> +#include <asm/backoff.h> .text @@ -29,6 +29,7 @@ .globl test_and_set_bit .type test_and_set_bit,#function test_and_set_bit: /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) BITOP_PRE_BARRIER srlx %o0, 6, %g1 mov 1, %o2 @@ -40,18 +41,20 @@ test_and_set_bit: /* %o0=nr, %o1=addr */ or %g7, %o2, %g1 casx [%o1], %g7, %g1 cmp %g7, %g1 - bne,pn %xcc, 1b + bne,pn %xcc, 2f and %g7, %o2, %g2 clr %o0 movrne %g2, 1, %o0 BITOP_POST_BARRIER retl nop +2: BACKOFF_SPIN(%o3, %o4, 1b) .size test_and_set_bit, .-test_and_set_bit .globl test_and_clear_bit .type test_and_clear_bit,#function test_and_clear_bit: /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) BITOP_PRE_BARRIER srlx %o0, 6, %g1 mov 1, %o2 @@ -63,18 +66,20 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */ andn %g7, %o2, %g1 casx [%o1], %g7, %g1 cmp %g7, %g1 - bne,pn %xcc, 1b + bne,pn %xcc, 2f and %g7, %o2, %g2 clr %o0 movrne %g2, 1, %o0 BITOP_POST_BARRIER retl nop +2: BACKOFF_SPIN(%o3, %o4, 1b) .size test_and_clear_bit, .-test_and_clear_bit .globl test_and_change_bit .type test_and_change_bit,#function test_and_change_bit: /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) BITOP_PRE_BARRIER srlx %o0, 6, %g1 mov 1, %o2 @@ -86,18 +91,20 @@ test_and_change_bit: /* %o0=nr, %o1=addr */ xor %g7, %o2, %g1 casx [%o1], %g7, %g1 cmp %g7, %g1 - bne,pn %xcc, 1b + bne,pn %xcc, 2f and %g7, %o2, %g2 clr %o0 movrne %g2, 1, %o0 BITOP_POST_BARRIER retl nop +2: BACKOFF_SPIN(%o3, %o4, 1b) .size test_and_change_bit, .-test_and_change_bit .globl set_bit .type set_bit,#function set_bit: /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) srlx %o0, 6, %g1 mov 1, %o2 sllx %g1, 3, %g3 @@ -108,15 +115,17 @@ set_bit: /* %o0=nr, %o1=addr */ or %g7, %o2, %g1 casx [%o1], %g7, %g1 cmp %g7, %g1 - bne,pn %xcc, 1b + bne,pn %xcc, 2f nop retl nop +2: BACKOFF_SPIN(%o3, %o4, 1b) .size set_bit, .-set_bit .globl clear_bit .type clear_bit,#function clear_bit: /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) srlx %o0, 6, %g1 mov 1, %o2 sllx %g1, 3, %g3 @@ -127,15 +136,17 @@ clear_bit: /* %o0=nr, %o1=addr */ andn %g7, %o2, %g1 casx [%o1], %g7, %g1 cmp %g7, %g1 - bne,pn %xcc, 1b + bne,pn %xcc, 2f nop retl nop +2: BACKOFF_SPIN(%o3, %o4, 1b) .size clear_bit, .-clear_bit .globl change_bit .type change_bit,#function change_bit: /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) srlx %o0, 6, %g1 mov 1, %o2 sllx %g1, 3, %g3 @@ -146,8 +157,9 @@ change_bit: /* %o0=nr, %o1=addr */ xor %g7, %o2, %g1 casx [%o1], %g7, %g1 cmp %g7, %g1 - bne,pn %xcc, 1b + bne,pn %xcc, 2f nop retl nop +2: BACKOFF_SPIN(%o3, %o4, 1b) .size change_bit, .-change_bit diff --git a/arch/sparc64/oprofile/Kconfig b/arch/sparc64/oprofile/Kconfig deleted file mode 100644 index d8a8408..0000000 --- a/arch/sparc64/oprofile/Kconfig +++ /dev/null @@ -1,17 +0,0 @@ -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c index 3b67de7..c86cb30 100644 --- a/arch/sparc64/solaris/misc.c +++ b/arch/sparc64/solaris/misc.c @@ -415,7 +415,7 @@ asmlinkage int solaris_procids(int cmd, s32 pid, s32 pgid) switch (cmd) { case 0: /* getpgrp */ - return process_group(current); + return task_pgrp_nr(current); case 1: /* setpgrp */ { int (*sys_setpgid)(pid_t,pid_t) = @@ -426,7 +426,7 @@ asmlinkage int solaris_procids(int cmd, s32 pid, s32 pgid) ret = sys_setpgid(0, 0); if (ret) return ret; proc_clear_tty(current); - return process_group(current); + return task_pgrp_nr(current); } case 2: /* getsid */ { diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 740d8a9..dd1689b 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -3,7 +3,7 @@ config DEFCONFIG_LIST option defconfig_list default "arch/$ARCH/defconfig" -# UML uses the generic IRQ sugsystem +# UML uses the generic IRQ subsystem config GENERIC_HARDIRQS bool default y @@ -289,4 +289,6 @@ config INPUT bool default n +source "kernel/Kconfig.instrumentation" + source "arch/um/Kconfig.debug" diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 76fe0b0..83bf15a 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -35,7 +35,7 @@ static void line_timer_cb(struct work_struct *work) /* * Returns the free space inside the ring buffer of this line. * - * Should be called while holding line->lock (this does not modify datas). + * Should be called while holding line->lock (this does not modify data). */ static int write_room(struct line *line) { diff --git a/arch/um/drivers/null.c b/arch/um/drivers/null.c index 21ad3d7..2b45a14 100644 --- a/arch/um/drivers/null.c +++ b/arch/um/drivers/null.c @@ -9,7 +9,7 @@ #include "chan_user.h" #include "os.h" -/* This address is used only as a unique identifer */ +/* This address is used only as a unique identifier */ static int null_chan; static void *null_init(char *str, int device, const struct chan_opts *opts) diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c index ae67e71..6b4a0f9 100644 --- a/arch/um/drivers/slip_kern.c +++ b/arch/um/drivers/slip_kern.c @@ -31,10 +31,8 @@ void slip_init(struct net_device *dev, void *data) slip_proto_init(&spri->slip); dev->init = NULL; - dev->header_cache_update = NULL; - dev->hard_header_cache = NULL; - dev->hard_header = NULL; dev->hard_header_len = 0; + dev->header_ops = NULL; dev->addr_len = 0; dev->type = ARPHRD_SLIP; dev->tx_queue_len = 256; diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c index 240ee65..d987af2 100644 --- a/arch/um/drivers/slirp_kern.c +++ b/arch/um/drivers/slirp_kern.c @@ -34,9 +34,7 @@ void slirp_init(struct net_device *dev, void *data) dev->init = NULL; dev->hard_header_len = 0; - dev->header_cache_update = NULL; - dev->hard_header_cache = NULL; - dev->hard_header = NULL; + dev->header_ops = NULL; dev->addr_len = 0; dev->type = ARPHRD_SLIP; dev->tx_queue_len = 256; diff --git a/arch/um/drivers/stderr_console.c b/arch/um/drivers/stderr_console.c index 4739dd5..d07a97f 100644 --- a/arch/um/drivers/stderr_console.c +++ b/arch/um/drivers/stderr_console.c @@ -8,7 +8,7 @@ /* trivial console driver -- simply dump everything to stderr */ /* - * Don't register by default -- as this registeres very early in the + * Don't register by default -- as this registers very early in the * boot process it becomes the default console. * * Initialized at init time. diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c index 13aa115..734f873 100644 --- a/arch/um/kernel/gmon_syms.c +++ b/arch/um/kernel/gmon_syms.c @@ -12,8 +12,8 @@ EXPORT_SYMBOL(__bb_init_func); * versions in libgcov. * * Since SuSE backported the fix, we cannot handle it depending on GCC version. - * So, unconditinally export it. But also give it a weak declaration, which will - * be overriden by any other one. + * So, unconditionally export it. But also give it a weak declaration, which will + * be overridden by any other one. */ extern void __gcov_init(void *) __attribute__((weak)); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 277fce1..70c2d62 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -326,7 +326,7 @@ int deactivate_all_fds(void) } /* - * do_IRQ handles all normal device IRQ's (the special + * do_IRQ handles all normal device IRQs (the special * SMP cross-CPU interrupts have their own specific * handlers). */ diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index a0eba08..47b57b4 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -237,7 +237,7 @@ void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs, /* User-mode eip? */ info.si_addr = UPT_IS_USER(regs) ? (void __user *) UPT_IP(regs) : NULL; - /* Send us the fakey SIGTRAP */ + /* Send us the fake SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index bd06055..cb3321f 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -108,7 +108,7 @@ out_nosemaphore: * us unable to handle the page fault gracefully. */ out_of_memory: - if (is_init(current)) { + if (is_global_init(current)) { up_read(&mm->mmap_sem); yield(); down_read(&mm->mmap_sem); diff --git a/arch/um/sys-i386/bug.c b/arch/um/sys-i386/bug.c index 200c8ba..a4360b5 100644 --- a/arch/um/sys-i386/bug.c +++ b/arch/um/sys-i386/bug.c @@ -6,7 +6,7 @@ #include <linux/uaccess.h> /* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because - * that's not relevent in skas mode. + * that's not relevant in skas mode. */ int is_valid_bugaddr(unsigned long eip) diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c index b02266a..fcaff86 100644 --- a/arch/um/sys-i386/tls.c +++ b/arch/um/sys-i386/tls.c @@ -45,7 +45,7 @@ int do_get_thread_area(struct user_desc *info) * XXX: Consider leaving one free slot for glibc usage at first place. This must * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else. * - * Also, this must be tested when compiling in SKAS mode with dinamic linking + * Also, this must be tested when compiling in SKAS mode with dynamic linking * and running against NPTL. */ static int get_free_idx(struct task_struct* task) diff --git a/arch/um/sys-x86_64/bug.c b/arch/um/sys-x86_64/bug.c index 200c8ba..a4360b5 100644 --- a/arch/um/sys-x86_64/bug.c +++ b/arch/um/sys-x86_64/bug.c @@ -6,7 +6,7 @@ #include <linux/uaccess.h> /* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because - * that's not relevent in skas mode. + * that's not relevant in skas mode. */ int is_valid_bugaddr(unsigned long eip) diff --git a/arch/um/sys-x86_64/sysrq.c b/arch/um/sys-x86_64/sysrq.c index ce3e07f..7654440 100644 --- a/arch/um/sys-x86_64/sysrq.c +++ b/arch/um/sys-x86_64/sysrq.c @@ -15,8 +15,8 @@ void __show_regs(struct pt_regs * regs) { printk("\n"); print_modules(); - printk("Pid: %d, comm: %.20s %s %s\n", - current->pid, current->comm, print_tainted(), init_utsname()->release); + printk("Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current), + current->comm, print_tainted(), init_utsname()->release); printk("RIP: %04lx:[<%016lx>] ", PT_REGS_CS(regs) & 0xffff, PT_REGS_RIP(regs)); printk("\nRSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs), diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig index ace479a..b6a50b8 100644 --- a/arch/v850/Kconfig +++ b/arch/v850/Kconfig @@ -331,6 +331,8 @@ source "sound/Kconfig" source "drivers/usb/Kconfig" +source "kernel/Kconfig.instrumentation" + source "arch/v850/Kconfig.debug" source "security/Kconfig" diff --git a/arch/v850/kernel/me2.c b/arch/v850/kernel/me2.c index 38be5c1..007115d 100644 --- a/arch/v850/kernel/me2.c +++ b/arch/v850/kernel/me2.c @@ -58,13 +58,13 @@ void __init me2_init_irqs (void) void me2_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud) { if (chan == 0) { - /* Specify that the relevent pins on the chip should do + /* Specify that the relevant pins on the chip should do serial I/O, not direct I/O. */ ME2_PORT1_PMC |= 0xC; /* Specify that we're using the UART, not the CSI device. */ ME2_PORT1_PFC |= 0xC; } else if (chan == 1) { - /* Specify that the relevent pins on the chip should do + /* Specify that the relevant pins on the chip should do serial I/O, not direct I/O. */ ME2_PORT2_PMC |= 0x6; /* Specify that we're using the UART, not the CSI device. */ diff --git a/arch/v850/kernel/rte_mb_a_pci.c b/arch/v850/kernel/rte_mb_a_pci.c index 35a4bd5..7165478 100644 --- a/arch/v850/kernel/rte_mb_a_pci.c +++ b/arch/v850/kernel/rte_mb_a_pci.c @@ -179,7 +179,7 @@ static int __devinit pcibios_init (void) default uses. */ /* Significant address bits used for decoding PCI GCS5 space - accessess. */ + accesses. */ MB_A_PCI_DMRR = ~(MB_A_PCI_MEM_SIZE - 1); /* I don't understand this, but the SolutionGear example code @@ -775,7 +775,7 @@ pci_alloc_consistent (struct pci_dev *pdev, size_t size, dma_addr_t *dma_addr) /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must be values that were returned from pci_alloc_consistent. SIZE must be the same as what as passed into pci_alloc_consistent. References to - the memory and mappings assosciated with CPU_ADDR or DMA_ADDR past + the memory and mappings associated with CPU_ADDR or DMA_ADDR past this call are illegal. */ void pci_free_consistent (struct pci_dev *pdev, size_t size, void *cpu_addr, diff --git a/arch/x86/boot/compressed/misc_32.c b/arch/x86/boot/compressed/misc_32.c index b28505c..1dc1e19 100644 --- a/arch/x86/boot/compressed/misc_32.c +++ b/arch/x86/boot/compressed/misc_32.c @@ -25,7 +25,7 @@ /* * Getting to provable safe in place decompression is hard. - * Worst case behaviours need to be analized. + * Worst case behaviours need to be analyzed. * Background information: * * The file layout is: @@ -94,7 +94,7 @@ * Adding 32768 instead of 32767 just makes for round numbers. * Adding the decompressor_size is necessary as it musht live after all * of the data as well. Last I measured the decompressor is about 14K. - * 10K of actuall data and 4K of bss. + * 10K of actual data and 4K of bss. * */ diff --git a/arch/x86/boot/compressed/misc_64.c b/arch/x86/boot/compressed/misc_64.c index f932b0e..6ea015a 100644 --- a/arch/x86/boot/compressed/misc_64.c +++ b/arch/x86/boot/compressed/misc_64.c @@ -25,7 +25,7 @@ /* * Getting to provable safe in place decompression is hard. - * Worst case behaviours need to be analized. + * Worst case behaviours need to be analyzed. * Background information: * * The file layout is: @@ -94,7 +94,7 @@ * Adding 32768 instead of 32767 just makes for round numbers. * Adding the decompressor_size is necessary as it musht live after all * of the data as well. Last I measured the decompressor is about 14K. - * 10K of actuall data and 4K of bss. + * 10K of actual data and 4K of bss. * */ diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c index 118b9f9..55822d2 100644 --- a/arch/x86/ia32/ia32_binfmt.c +++ b/arch/x86/ia32/ia32_binfmt.c @@ -5,10 +5,6 @@ * This tricks binfmt_elf.c into loading 32bit binaries using lots * of ugly preprocessor tricks. Talk about very very poor man's inheritance. */ -#define __ASM_X86_64_ELF_H 1 - -#undef ELF_CLASS -#define ELF_CLASS ELFCLASS32 #include <linux/types.h> #include <linux/stddef.h> @@ -19,6 +15,7 @@ #include <linux/binfmts.h> #include <linux/mm.h> #include <linux/security.h> +#include <linux/elfcore-compat.h> #include <asm/segment.h> #include <asm/ptrace.h> @@ -31,6 +28,20 @@ #include <asm/ia32.h> #include <asm/vsyscall32.h> +#undef ELF_ARCH +#undef ELF_CLASS +#define ELF_CLASS ELFCLASS32 +#define ELF_ARCH EM_386 + +#undef elfhdr +#undef elf_phdr +#undef elf_note +#undef elf_addr_t +#define elfhdr elf32_hdr +#define elf_phdr elf32_phdr +#define elf_note elf32_note +#define elf_addr_t Elf32_Off + #define ELF_NAME "elf/i386" #define AT_SYSINFO 32 @@ -48,74 +59,20 @@ int sysctl_vsyscall32 = 1; } while(0) struct file; -struct elf_phdr; #define IA32_EMULATOR 1 -#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) - -#undef ELF_ARCH -#define ELF_ARCH EM_386 - -#define ELF_DATA ELFDATA2LSB +#undef ELF_ET_DYN_BASE -#define USE_ELF_CORE_DUMP 1 - -/* Override elfcore.h */ -#define _LINUX_ELFCORE_H 1 -typedef unsigned int elf_greg_t; - -#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t)) -typedef elf_greg_t elf_gregset_t[ELF_NGREG]; - -struct elf_siginfo -{ - int si_signo; /* signal number */ - int si_code; /* extra code */ - int si_errno; /* errno */ -}; +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) #define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0) -struct elf_prstatus -{ - struct elf_siginfo pr_info; /* Info associated with signal */ - short pr_cursig; /* Current signal */ - unsigned int pr_sigpend; /* Set of pending signals */ - unsigned int pr_sighold; /* Set of held signals */ - pid_t pr_pid; - pid_t pr_ppid; - pid_t pr_pgrp; - pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ - struct compat_timeval pr_cutime; /* Cumulative user time */ - struct compat_timeval pr_cstime; /* Cumulative system time */ - elf_gregset_t pr_reg; /* GP registers */ - int pr_fpvalid; /* True if math co-processor being used. */ -}; - -#define ELF_PRARGSZ (80) /* Number of chars for args */ - -struct elf_prpsinfo -{ - char pr_state; /* numeric process state */ - char pr_sname; /* char for pr_state */ - char pr_zomb; /* zombie */ - char pr_nice; /* nice val */ - unsigned int pr_flag; /* flags */ - __u16 pr_uid; - __u16 pr_gid; - pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; - /* Lots missing */ - char pr_fname[16]; /* filename of executable */ - char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ -}; - #define _GET_SEG(x) \ ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; }) /* Assumes current==process to be dumped */ +#undef ELF_CORE_COPY_REGS #define ELF_CORE_COPY_REGS(pr_reg, regs) \ pr_reg[0] = regs->rbx; \ pr_reg[1] = regs->rcx; \ @@ -135,36 +92,41 @@ struct elf_prpsinfo pr_reg[15] = regs->rsp; \ pr_reg[16] = regs->ss; -#define user user32 + +#define elf_prstatus compat_elf_prstatus +#define elf_prpsinfo compat_elf_prpsinfo +#define elf_fpregset_t struct user_i387_ia32_struct +#define elf_fpxregset_t struct user32_fxsr_struct +#define user user32 #undef elf_read_implies_exec #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) -//#include <asm/ia32.h> -#include <linux/elf.h> - -typedef struct user_i387_ia32_struct elf_fpregset_t; -typedef struct user32_fxsr_struct elf_fpxregset_t; - -static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) +#define elf_core_copy_regs elf32_core_copy_regs +static inline void elf32_core_copy_regs(compat_elf_gregset_t *elfregs, + struct pt_regs *regs) { - ELF_CORE_COPY_REGS((*elfregs), regs) + ELF_CORE_COPY_REGS((&elfregs->ebx), regs) } -static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) +#define elf_core_copy_task_regs elf32_core_copy_task_regs +static inline int elf32_core_copy_task_regs(struct task_struct *t, + compat_elf_gregset_t* elfregs) { struct pt_regs *pp = task_pt_regs(t); - ELF_CORE_COPY_REGS((*elfregs), pp); + ELF_CORE_COPY_REGS((&elfregs->ebx), pp); /* fix wrong segments */ - (*elfregs)[7] = t->thread.ds; - (*elfregs)[9] = t->thread.fsindex; - (*elfregs)[10] = t->thread.gsindex; - (*elfregs)[8] = t->thread.es; + elfregs->ds = t->thread.ds; + elfregs->fs = t->thread.fsindex; + elfregs->gs = t->thread.gsindex; + elfregs->es = t->thread.es; return 1; } +#define elf_core_copy_task_fpregs elf32_core_copy_task_fpregs static inline int -elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu) +elf32_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, + elf_fpregset_t *fpu) { struct _fpstate_ia32 *fpstate = (void*)fpu; mm_segment_t oldfs = get_fs(); @@ -186,8 +148,9 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr #define ELF_CORE_COPY_XFPREGS 1 #define ELF_CORE_XFPREG_TYPE NT_PRXFPREG +#define elf_core_copy_task_xfpregs elf32_core_copy_task_xfpregs static inline int -elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) +elf32_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) { struct pt_regs *regs = task_pt_regs(t); if (!tsk_used_math(t)) @@ -206,6 +169,10 @@ elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) extern int force_personality32; +#undef ELF_EXEC_PAGESIZE +#undef ELF_HWCAP +#undef ELF_PLATFORM +#undef SET_PERSONALITY #define ELF_EXEC_PAGESIZE PAGE_SIZE #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) #define ELF_PLATFORM ("i686") @@ -231,6 +198,7 @@ do { \ #define load_elf_binary load_elf32_binary +#undef ELF_PLAT_INIT #define ELF_PLAT_INIT(r, load_addr) elf32_init(r) #undef start_thread @@ -289,7 +257,6 @@ static void elf32_init(struct pt_regs *regs) static ctl_table abi_table2[] = { { - .ctl_name = 99, .procname = "vsyscall32", .data = &sysctl_vsyscall32, .maxlen = sizeof(int), diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32 index a3fa11f..ccea590 100644 --- a/arch/x86/kernel/Makefile_32 +++ b/arch/x86/kernel/Makefile_32 @@ -2,7 +2,7 @@ # Makefile for the linux kernel. # -extra-y := head_32.o init_task_32.o vmlinux.lds +extra-y := head_32.o init_task.o vmlinux.lds obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \ ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \ @@ -17,6 +17,7 @@ obj-$(CONFIG_MCA) += mca_32.o obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_X86_CPUID) += cpuid.o obj-$(CONFIG_MICROCODE) += microcode.o +obj-$(CONFIG_PCI) += early-quirks.o obj-$(CONFIG_APM) += apm_32.o obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o obj-$(CONFIG_SMP) += smpcommon_32.o diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64 index 43da662..dec06e7 100644 --- a/arch/x86/kernel/Makefile_64 +++ b/arch/x86/kernel/Makefile_64 @@ -2,7 +2,7 @@ # Makefile for the linux kernel. # -extra-y := head_64.o head64.o init_task_64.o vmlinux.lds +extra-y := head_64.o head64.o init_task.o vmlinux.lds EXTRA_AFLAGS := -traditional obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \ ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \ @@ -39,7 +39,7 @@ obj-$(CONFIG_K8_NB) += k8.o obj-$(CONFIG_AUDIT) += audit_64.o obj-$(CONFIG_MODULES) += module_64.o -obj-$(CONFIG_PCI) += early-quirks_64.o +obj-$(CONFIG_PCI) += early-quirks.o obj-y += topology.o obj-y += intel_cacheinfo.o diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32 index a4852a2..045dd54 100644 --- a/arch/x86/kernel/acpi/Makefile_32 +++ b/arch/x86/kernel/acpi/Makefile_32 @@ -1,7 +1,4 @@ obj-$(CONFIG_ACPI) += boot.o -ifneq ($(CONFIG_PCI),) -obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o -endif obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o ifneq ($(CONFIG_ACPI_PROCESSOR),) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index afd2afe..289247d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -99,7 +99,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; /* * The default interrupt routing model is PIC (8259). This gets - * overriden if IOAPICs are enumerated (below). + * overridden if IOAPICs are enumerated (below). */ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; @@ -414,8 +414,8 @@ acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end * * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. - * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) - * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) + * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0) + * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0) */ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) @@ -427,7 +427,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) old = inb(0x4d0) | (inb(0x4d1) << 8); /* - * If we use ACPI to set PCI irq's, then we should clear ELCR + * If we use ACPI to set PCI IRQs, then we should clear ELCR * since we will set it correctly as we enable the PCI irq * routing. */ @@ -555,7 +555,7 @@ EXPORT_SYMBOL(acpi_map_lsapic); int acpi_unmap_lsapic(int cpu) { - x86_cpu_to_apicid[cpu] = -1; + per_cpu(x86_cpu_to_apicid, cpu) = -1; cpu_clear(cpu, cpu_present_map); num_processors--; diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 2d39f55..10b6717 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -29,7 +29,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, unsigned int cpu) { - struct cpuinfo_x86 *c = cpu_data + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); flags->bm_check = 0; if (num_online_cpus() == 1) @@ -72,7 +72,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, struct acpi_processor_cx *cx, struct acpi_power_register *reg) { struct cstate_entry *percpu_entry; - struct cpuinfo_x86 *c = cpu_data + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); cpumask_t saved_mask; int retval; diff --git a/arch/x86/kernel/acpi/earlyquirk_32.c b/arch/x86/kernel/acpi/earlyquirk_32.c deleted file mode 100644 index 23f78ef..0000000 --- a/arch/x86/kernel/acpi/earlyquirk_32.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Do early PCI probing for bug detection when the main PCI subsystem is - * not up yet. - */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/acpi.h> - -#include <asm/pci-direct.h> -#include <asm/acpi.h> -#include <asm/apic.h> - -#ifdef CONFIG_ACPI - -static int __init nvidia_hpet_check(struct acpi_table_header *header) -{ - return 0; -} -#endif - -static int __init check_bridge(int vendor, int device) -{ -#ifdef CONFIG_ACPI - static int warned; - /* According to Nvidia all timer overrides are bogus unless HPET - is enabled. */ - if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { - if (!warned && acpi_table_parse(ACPI_SIG_HPET, - nvidia_hpet_check)) { - warned = 1; - acpi_skip_timer_override = 1; - printk(KERN_INFO "Nvidia board " - "detected. Ignoring ACPI " - "timer override.\n"); - printk(KERN_INFO "If you got timer trouble " - "try acpi_use_timer_override\n"); - - } - } -#endif - if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { - timer_over_8254 = 0; - printk(KERN_INFO "ATI board detected. Disabling timer routing " - "over 8254.\n"); - } - return 0; -} - -void __init check_acpi_pci(void) -{ - int num, slot, func; - - /* Assume the machine supports type 1. If not it will - always read ffffffff and should not have any side effect. - Actually a few buggy systems can machine check. Allow the user - to disable it by command line option at least -AK */ - if (!early_pci_allowed()) - return; - - /* Poor man's PCI discovery */ - for (num = 0; num < 32; num++) { - for (slot = 0; slot < 32; slot++) { - for (func = 0; func < 8; func++) { - u32 class; - u32 vendor; - class = read_pci_config(num, slot, func, - PCI_CLASS_REVISION); - if (class == 0xffffffff) - break; - - if ((class >> 16) != PCI_CLASS_BRIDGE_PCI) - continue; - - vendor = read_pci_config(num, slot, func, - PCI_VENDOR_ID); - - if (check_bridge(vendor & 0xffff, vendor >> 16)) - return; - } - - } - } -} diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c index b54fded..2ed0a4c 100644 --- a/arch/x86/kernel/acpi/processor.c +++ b/arch/x86/kernel/acpi/processor.c @@ -63,7 +63,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) void arch_acpi_processor_init_pdc(struct acpi_processor *pr) { unsigned int cpu = pr->id; - struct cpuinfo_x86 *c = cpu_data + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); pr->pdc = NULL; if (c->x86_vendor == X86_VENDOR_INTEL) diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index f22ba85..a97313b 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -11,7 +11,7 @@ # # If physical address of wakeup_code is 0x12345, BIOS should call us with # cs = 0x1234, eip = 0x05 -# +# #define BEEP \ inb $97, %al; \ @@ -52,7 +52,6 @@ wakeup_code: BEEP 1: mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board - movw $0x0e00 + 'S', %fs:(0x12) pushl $0 # Kill any dangerous flags popfl @@ -90,9 +89,6 @@ wakeup_code: # make sure %cr4 is set correctly (features, etc) movl real_save_cr4 - wakeup_code, %eax movl %eax, %cr4 - movw $0xb800, %ax - movw %ax,%fs - movw $0x0e00 + 'i', %fs:(0x12) # need a gdt -- use lgdtl to force 32-bit operands, in case # the GDT is located past 16 megabytes. @@ -102,8 +98,6 @@ wakeup_code: movl %eax, %cr0 jmp 1f 1: - movw $0x0e00 + 'n', %fs:(0x14) - movl real_magic - wakeup_code, %eax cmpl $0x12345678, %eax jne bogus_real_magic @@ -122,13 +116,11 @@ real_save_cr4: .long 0 real_magic: .long 0 video_mode: .long 0 realmode_flags: .long 0 -beep_flags: .long 0 real_efer_save_restore: .long 0 real_save_efer_edx: .long 0 real_save_efer_eax: .long 0 bogus_real_magic: - movw $0x0e00 + 'B', %fs:(0x12) jmp bogus_real_magic /* This code uses an extended set of video mode numbers. These include: @@ -194,7 +186,6 @@ wakeup_pmode_return: movw %ax, %es movw %ax, %fs movw %ax, %gs - movw $0x0e00 + 'u', 0xb8016 # reload the gdt, as we need the full 32 bit address lgdt saved_gdt @@ -218,7 +209,6 @@ wakeup_pmode_return: jmp *%eax bogus_magic: - movw $0x0e00 + 'B', 0xb8018 jmp bogus_magic diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 8b4357e..55608ec 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -41,7 +41,6 @@ wakeup_code: # Running in *copy* of this code, somewhere in low 1MB. - movb $0xa1, %al ; outb %al, $0x80 cli cld # setup data segment @@ -65,11 +64,6 @@ wakeup_code: cmpl $0x12345678, %eax jne bogus_real_magic - call verify_cpu # Verify the cpu supports long - # mode - testl %eax, %eax - jnz no_longmode - testl $1, realmode_flags - wakeup_code jz 1f lcall $0xc000,$3 @@ -84,12 +78,6 @@ wakeup_code: call mode_set 1: - movw $0xb800, %ax - movw %ax,%fs - movw $0x0e00 + 'L', %fs:(0x10) - - movb $0xa2, %al ; outb %al, $0x80 - mov %ds, %ax # Find 32bit wakeup_code addr movzx %ax, %esi # (Convert %ds:gdt to a liner ptr) shll $4, %esi @@ -117,14 +105,10 @@ wakeup_32_vector: .code32 wakeup_32: # Running in this code, but at low address; paging is not yet turned on. - movb $0xa5, %al ; outb %al, $0x80 movl $__KERNEL_DS, %eax movl %eax, %ds - movw $0x0e00 + 'i', %ds:(0xb8012) - movb $0xa8, %al ; outb %al, $0x80; - /* * Prepare for entering 64bits mode */ @@ -200,16 +184,11 @@ wakeup_long64: */ lgdt cpu_gdt_descr - movw $0x0e00 + 'n', %ds:(0xb8014) - movb $0xa9, %al ; outb %al, $0x80 - movq saved_magic, %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax jne bogus_64_magic - movw $0x0e00 + 'u', %ds:(0xb8016) - nop nop movw $__KERNEL_DS, %ax @@ -220,13 +199,11 @@ wakeup_long64: movw %ax, %gs movq saved_rsp, %rsp - movw $0x0e00 + 'x', %ds:(0xb8018) movq saved_rbx, %rbx movq saved_rdi, %rdi movq saved_rsi, %rsi movq saved_rbp, %rbp - movw $0x0e00 + '!', %ds:(0xb801a) movq saved_rip, %rax jmp *%rax @@ -256,21 +233,12 @@ realmode_flags: .quad 0 .code16 bogus_real_magic: - movb $0xba,%al ; outb %al,$0x80 jmp bogus_real_magic .code64 bogus_64_magic: - movb $0xb3,%al ; outb %al,$0x80 jmp bogus_64_magic -.code16 -no_longmode: - movb $0xbc,%al ; outb %al,$0x80 - jmp no_longmode - -#include "../verify_cpu_64.S" - /* This code uses an extended set of video mode numbers. These include: * Aliases for standard modes * NORMAL_VGA (-1) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 3bd2688..d6405e0 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -357,14 +357,14 @@ void alternatives_smp_switch(int smp) if (smp) { printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); - clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); + clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); list_for_each_entry(mod, &smp_alt_modules, next) alternatives_smp_lock(mod->locks, mod->locks_end, mod->text, mod->text_end); } else { printk(KERN_INFO "SMP alternatives: switching to UP code\n"); set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); - set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); + set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); list_for_each_entry(mod, &smp_alt_modules, next) alternatives_smp_unlock(mod->locks, mod->locks_end, mod->text, mod->text_end); @@ -432,7 +432,7 @@ void __init alternative_instructions(void) if (1 == num_possible_cpus()) { printk(KERN_INFO "SMP alternatives: switching to UP code\n"); set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); - set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); + set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); alternatives_smp_unlock(__smp_locks, __smp_locks_end, _text, _etext); } diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 793341f..08b07c1 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -947,7 +947,7 @@ void __devinit setup_local_APIC(void) * Set up LVT0, LVT1: * * set up through-local-APIC on the BP's LINT0. This is not - * strictly necessery in pure symmetric-IO mode, but sometimes + * strictly necessary in pure symmetric-IO mode, but sometimes * we delegate interrupts to the 8259A. */ /* @@ -998,7 +998,7 @@ void __devinit setup_local_APIC(void) } else { if (esr_disable) /* - * Something untraceble is creating bad interrupts on + * Something untraceable is creating bad interrupts on * secondary quads ... for the moment, just leave the * ESR disabled - we can't do anything useful with the * errors anyway - mbligh diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 32f2365..17089a0 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -57,7 +57,7 @@ * screen-blanking and gpm (Stephen Rothwell); Linux 1.99.4 * 1.2a:Simple change to stop mysterious bug reports with SMP also added * levels to the printk calls. APM is not defined for SMP machines. - * The new replacment for it is, but Linux doesn't yet support this. + * The new replacement for it is, but Linux doesn't yet support this. * Alan Cox Linux 2.1.55 * 1.3: Set up a valid data descriptor 0x40 for buggy BIOS's * 1.4: Upgraded to support APM 1.2. Integrated ThinkPad suspend patch by diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5f8af87..1ff88c7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -266,7 +266,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_HT /* * On a AMD multi core setup the lower bits of the APIC id - * distingush the cores. + * distinguish the cores. */ if (c->x86_max_cores > 1) { int cpu = smp_processor_id(); diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 473eac8..9681fa1 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -53,7 +53,7 @@ static u32 __cpuinit ramtop(void) /* 16388 */ continue; /* * Don't MCR over reserved space. Ignore the ISA hole - * we frob around that catastrophy already + * we frob around that catastrophe already */ if (e820.map[i].type == E820_RESERVED) @@ -287,7 +287,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) c->x86_capability[5] = cpuid_edx(0xC0000001); } - /* Cyrix III family needs CX8 & PGE explicity enabled. */ + /* Cyrix III family needs CX8 & PGE explicitly enabled. */ if (c->x86_model >=6 && c->x86_model <= 9) { rdmsr (MSR_VIA_FCR, lo, hi); lo |= (1<<1 | 1<<7); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d506201..e2fcf20 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -207,7 +207,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) static int __init x86_fxsr_setup(char * s) { - /* Tell all the other CPU's to not use it... */ + /* Tell all the other CPUs to not use it... */ disable_x86_fxsr = 1; /* diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig_32 index d8c6f13..d8c6f13 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig_32 diff --git a/arch/x86/kernel/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig_64 index a3fd519..9c9699f 100644 --- a/arch/x86/kernel/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig_64 @@ -19,7 +19,7 @@ config X86_POWERNOW_K8 To compile this driver as a module, choose M here: the module will be called powernow-k8. - For details, take a look at <file:Documentation/cpu-freq/>. + For details, take a look at <file:Documentation/cpu-freq/>. If in doubt, say N. diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 2ca43ba..fea0af0 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -77,7 +77,7 @@ static unsigned int acpi_pstate_strict; static int check_est_cpu(unsigned int cpuid) { - struct cpuinfo_x86 *cpu = &cpu_data[cpuid]; + struct cpuinfo_x86 *cpu = &cpu_data(cpuid); if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) @@ -560,7 +560,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned int cpu = policy->cpu; struct acpi_cpufreq_data *data; unsigned int result = 0; - struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; + struct cpuinfo_x86 *c = &cpu_data(policy->cpu); struct acpi_processor_performance *perf; dprintk("acpi_cpufreq_cpu_init\n"); diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 32f0bda..f03e9153 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -260,7 +260,7 @@ static int nforce2_target(struct cpufreq_policy *policy, freqs.old = nforce2_get(policy->cpu); freqs.new = target_fsb * fid * 100; - freqs.cpu = 0; /* Only one CPU on nForce2 plattforms */ + freqs.cpu = 0; /* Only one CPU on nForce2 platforms */ if (freqs.old == freqs.new) return 0; diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index c11baaf..326a4c8 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c @@ -305,7 +305,7 @@ static struct cpufreq_driver eps_driver = { static int __init eps_init(void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); /* This driver will work only on Centaur C7 processors with * Enhanced SpeedStep/PowerSaver registers */ diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index 1e7ae7d..94619c2 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c @@ -199,7 +199,7 @@ static int elanfreq_target (struct cpufreq_policy *policy, static int elanfreq_cpu_init(struct cpufreq_policy *policy) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); unsigned int i; int result; @@ -280,7 +280,7 @@ static struct cpufreq_driver elanfreq_driver = { static int __init elanfreq_init(void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); /* Test if we have the right hardware */ if ((c->x86_vendor != X86_VENDOR_AMD) || diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c index ed2bda1..2ed7db2 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c @@ -12,12 +12,12 @@ * of any nature resulting due to the use of this software. This * software is provided AS-IS with no warranties. * - * Theoritical note: + * Theoretical note: * * (see Geode(tm) CS5530 manual (rev.4.1) page.56) * * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 - * are based on Suspend Moduration. + * are based on Suspend Modulation. * * Suspend Modulation works by asserting and de-asserting the SUSP# pin * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# @@ -101,11 +101,11 @@ /* SUSCFG bits */ #define SUSMOD (1<<0) /* enable/disable suspend modulation */ -/* the belows support only with cs5530 (after rev.1.2)/cs5530A */ +/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */ #define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ #define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ -/* the belows support only with cs5530A */ +/* the below is supported only with cs5530A */ #define PWRSVE_ISA (1<<3) /* stop ISA clock */ #define PWRSVE (1<<4) /* active idle */ diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index 5045f5d..749d00c 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void) static int __init longhaul_cpu_init(struct cpufreq_policy *policy) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); char *cpuname=NULL; int ret; u32 lo, hi; @@ -959,7 +959,7 @@ static struct cpufreq_driver longhaul_driver = { static int __init longhaul_init(void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) return -ENODEV; diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index b268951..af4a867 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c @@ -172,7 +172,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, u32 save_lo, save_hi; u32 eax, ebx, ecx, edx; u32 try_hi; - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); if (!low_freq || !high_freq) return -EINVAL; @@ -298,7 +298,7 @@ static struct cpufreq_driver longrun_driver = { */ static int __init longrun_init(void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); if (c->x86_vendor != X86_VENDOR_TRANSMETA || !cpu_has(c, X86_FEATURE_LONGRUN)) diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 793eae8..14791ec 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -195,7 +195,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) { - struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; + struct cpuinfo_x86 *c = &cpu_data(policy->cpu); int cpuid = 0; unsigned int i; @@ -279,7 +279,7 @@ static struct cpufreq_driver p4clockmod_driver = { static int __init cpufreq_p4_init(void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); int ret; /* diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index 6d02853..eb9b62b 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c @@ -1,6 +1,6 @@ /* * This file was based upon code in Powertweak Linux (http://powertweak.sf.net) - * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski. + * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski. * * Licensed under the terms of the GNU GPL License version 2. * @@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = { */ static int __init powernow_k6_init(void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || ((c->x86_model != 12) && (c->x86_model != 13))) diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index f3686a5..b5a9863 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -114,7 +114,7 @@ static int check_fsb(unsigned int fsbspeed) static int check_powernow(void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); unsigned int maxei, eax, ebx, ecx, edx; if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) { diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c06ac68..9c36a53 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -168,7 +168,7 @@ static void count_off_irt(struct powernow_k8_data *data) return; } -/* the voltage stabalization time */ +/* the voltage stabilization time */ static void count_off_vst(struct powernow_k8_data *data) { udelay(data->vstable * VST_UNITS_20US); diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index b06c812..7c4f6e0 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -148,10 +148,10 @@ struct powernow_k8_data { #define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */ #define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */ -#define VST_UNITS_20US 20 /* Voltage Stabalization Time is in units of 20us */ +#define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */ /* - * Most values of interest are enocoded in a single field of the _PSS + * Most values of interest are encoded in a single field of the _PSS * entries: the "control" value. */ diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c index d9f3e90..42da9bd 100644 --- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c +++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c @@ -102,7 +102,7 @@ static int sc520_freq_target (struct cpufreq_policy *policy, static int sc520_freq_cpu_init(struct cpufreq_policy *policy) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); int result; /* capability check */ @@ -151,7 +151,7 @@ static struct cpufreq_driver sc520_freq_driver = { static int __init sc520_freq_init(void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); int err; /* Test if we have the right hardware */ diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 811d474..3031f11 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -230,7 +230,7 @@ static struct cpu_model models[] = static int centrino_cpu_init_table(struct cpufreq_policy *policy) { - struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; + struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); struct cpu_model *model; for(model = models; model->cpu_id != NULL; model++) @@ -340,7 +340,7 @@ static unsigned int get_cur_freq(unsigned int cpu) static int centrino_cpu_init(struct cpufreq_policy *policy) { - struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; + struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); unsigned freq; unsigned l, h; int ret; @@ -612,7 +612,7 @@ static struct cpufreq_driver centrino_driver = { */ static int __init centrino_init(void) { - struct cpuinfo_x86 *cpu = cpu_data; + struct cpuinfo_x86 *cpu = &cpu_data(0); if (!cpu_has(cpu, X86_FEATURE_EST)) return -ENODEV; diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index b1acc8c..76c3ab0 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency); unsigned int speedstep_detect_processor (void) { - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(0); u32 ebx, msr_lo, msr_hi; dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 122d2d7..88d66fb 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -93,7 +93,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); - setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ + setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ ccr5 = getCx86(CX86_CCR5); if (ccr5 & 2) setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */ @@ -115,9 +115,9 @@ static void __cpuinit set_cx86_reorder(void) printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n"); ccr3 = getCx86(CX86_CCR3); - setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ + setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ - /* Load/Store Serialize to mem access disable (=reorder it) */ + /* Load/Store Serialize to mem access disable (=reorder it) */ setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); /* set load/store serialize from 1GB to 4GB */ ccr3 |= 0xe0; @@ -146,7 +146,7 @@ static void __cpuinit set_cx86_inc(void) printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n"); ccr3 = getCx86(CX86_CCR3); - setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ + setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ /* PCR1 -- Performance Control */ /* Incrementor on, whatever that is */ setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); @@ -256,7 +256,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) u32 vendor, device; /* It isn't really a PCI quirk directly, but the cure is the same. The MediaGX has deep magic SMM stuff that handles the - SB emulation. It thows away the fifo on disable_dma() which + SB emulation. It throws away the fifo on disable_dma() which is wrong and ruins the audio. Bug2: VSA1 has a wrap bug so that using maximum sized DMA diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1826395..9921b01 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -295,7 +295,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; #ifdef CONFIG_X86_HT - unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); + unsigned int cpu = c->cpu_index; #endif if (c->cpuid_level > 3) { @@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) if (new_l2) { l2 = new_l2; #ifdef CONFIG_X86_HT - cpu_llc_id[cpu] = l2_id; + per_cpu(cpu_llc_id, cpu) = l2_id; #endif } if (new_l3) { l3 = new_l3; #ifdef CONFIG_X86_HT - cpu_llc_id[cpu] = l3_id; + per_cpu(cpu_llc_id, cpu) = l3_id; #endif } @@ -459,7 +459,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) struct _cpuid4_info *this_leaf, *sibling_leaf; unsigned long num_threads_sharing; int index_msb, i; - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(cpu); this_leaf = CPUID4_INFO_IDX(cpu, index); num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; @@ -470,8 +470,8 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) index_msb = get_count_order(num_threads_sharing); for_each_online_cpu(i) { - if (c[i].apicid >> index_msb == - c[cpu].apicid >> index_msb) { + if (cpu_data(i).apicid >> index_msb == + c->apicid >> index_msb) { cpu_set(i, this_leaf->shared_cpu_map); if (i != cpu && cpuid4_info[i]) { sibling_leaf = CPUID4_INFO_IDX(i, index); @@ -499,6 +499,11 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { static void free_cache_attributes(unsigned int cpu) { + int i; + + for (i = 0; i < num_cache_leaves; i++) + cache_remove_shared_cpu_map(cpu, i); + kfree(cpuid4_info[cpu]); cpuid4_info[cpu] = NULL; } @@ -506,8 +511,8 @@ static void free_cache_attributes(unsigned int cpu) static int __cpuinit detect_cache_attributes(unsigned int cpu) { struct _cpuid4_info *this_leaf; - unsigned long j; - int retval; + unsigned long j; + int retval; cpumask_t oldmask; if (num_cache_leaves == 0) @@ -524,19 +529,26 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) goto out; /* Do cpuid and store the results */ - retval = 0; for (j = 0; j < num_cache_leaves; j++) { this_leaf = CPUID4_INFO_IDX(cpu, j); retval = cpuid4_cache_lookup(j, this_leaf); - if (unlikely(retval < 0)) + if (unlikely(retval < 0)) { + int i; + + for (i = 0; i < j; i++) + cache_remove_shared_cpu_map(cpu, i); break; + } cache_shared_cpu_map_setup(cpu, j); } set_cpus_allowed(current, oldmask); out: - if (retval) - free_cache_attributes(cpu); + if (retval) { + kfree(cpuid4_info[cpu]); + cpuid4_info[cpu] = NULL; + } + return retval; } @@ -669,7 +681,7 @@ static struct kobj_type ktype_percpu_entry = { .sysfs_ops = &sysfs_ops, }; -static void cpuid4_cache_sysfs_exit(unsigned int cpu) +static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) { kfree(cache_kobject[cpu]); kfree(index_kobject[cpu]); @@ -680,13 +692,14 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu) static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) { + int err; if (num_cache_leaves == 0) return -ENOENT; - detect_cache_attributes(cpu); - if (cpuid4_info[cpu] == NULL) - return -ENOENT; + err = detect_cache_attributes(cpu); + if (err) + return err; /* Allocate all required memory */ cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); @@ -705,13 +718,15 @@ err_out: return -ENOMEM; } +static cpumask_t cache_dev_map = CPU_MASK_NONE; + /* Add/Remove cache interface for CPU device */ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i, j; struct _index_kobject *this_object; - int retval = 0; + int retval; retval = cpuid4_cache_sysfs_init(cpu); if (unlikely(retval < 0)) @@ -721,6 +736,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) kobject_set_name(cache_kobject[cpu], "%s", "cache"); cache_kobject[cpu]->ktype = &ktype_percpu_entry; retval = kobject_register(cache_kobject[cpu]); + if (retval < 0) { + cpuid4_cache_sysfs_exit(cpu); + return retval; + } for (i = 0; i < num_cache_leaves; i++) { this_object = INDEX_KOBJECT_PTR(cpu,i); @@ -740,6 +759,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) break; } } + if (!retval) + cpu_set(cpu, cache_dev_map); + return retval; } @@ -750,13 +772,14 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) if (cpuid4_info[cpu] == NULL) return; - for (i = 0; i < num_cache_leaves; i++) { - cache_remove_shared_cpu_map(cpu, i); + if (!cpu_isset(cpu, cache_dev_map)) + return; + cpu_clear(cpu, cache_dev_map); + + for (i = 0; i < num_cache_leaves; i++) kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); - } kobject_unregister(cache_kobject[cpu]); cpuid4_cache_sysfs_exit(cpu); - return; } static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, @@ -781,7 +804,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { - .notifier_call = cacheinfo_cpu_callback, + .notifier_call = cacheinfo_cpu_callback, }; static int __cpuinit cache_sysfs_init(void) @@ -791,14 +814,15 @@ static int __cpuinit cache_sysfs_init(void) if (num_cache_leaves == 0) return 0; - register_hotcpu_notifier(&cacheinfo_cpu_notifier); - for_each_online_cpu(i) { - struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); + int err; + struct sys_device *sys_dev = get_cpu_sysdev(i); - cache_add_dev(sys_dev); + err = cache_add_dev(sys_dev); + if (err) + return err; } - + register_hotcpu_notifier(&cacheinfo_cpu_notifier); return 0; } diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 494d320..24885be 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -131,17 +131,19 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, { unsigned int cpu = (unsigned long)hcpu; struct sys_device *sys_dev; - int err; + int err = 0; sys_dev = get_cpu_sysdev(cpu); switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: mutex_lock(&therm_cpu_lock); err = thermal_throttle_add_dev(sys_dev); mutex_unlock(&therm_cpu_lock); WARN_ON(err); break; + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: mutex_lock(&therm_cpu_lock); @@ -149,7 +151,7 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, mutex_unlock(&therm_cpu_lock); break; } - return NOTIFY_OK; + return err ? NOTIFY_BAD : NOTIFY_OK; } static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index 2287d48..9964be3 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c @@ -147,10 +147,10 @@ static void prepare_set(void) write_cr0(cr0); wbinvd(); - /* Cyrix ARRs - everything else were excluded at the top */ + /* Cyrix ARRs - everything else was excluded at the top */ ccr3 = getCx86(CX86_CCR3); - /* Cyrix ARRs - everything else were excluded at the top */ + /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); } diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 56f64e3..992f08d 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -182,7 +182,7 @@ static inline void k8_enable_fixed_iorrs(void) /** * Checks and updates an fixed-range MTRR if it differs from the value it - * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also. + * should have. If K8 extentions are wanted, update the K8 SYSCFG MSR also. * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information * \param msr MSR address of the MTTR which should be checked and updated * \param changed pointer which indicates whether the MTRR needed to be changed diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 5e4be30..9abbdf7 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -748,7 +748,7 @@ static int __init mtrr_init_finialize(void) if (use_intel()) mtrr_state_warn(); else { - /* The CPUs haven't MTRR and seemes not support SMP. They have + /* The CPUs haven't MTRR and seem to not support SMP. They have * specific drivers, we use a tricky method to support * suspend/resume for them. * TBD: is there any system with such CPU which supports diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 54cdbf1..c02541e 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -120,7 +120,9 @@ int reserve_perfctr_nmi(unsigned int msr) unsigned int counter; counter = nmi_perfctr_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); + /* register not managed by the allocator? */ + if (counter > NMI_MAX_COUNTER_BITS) + return 1; if (!test_and_set_bit(counter, perfctr_nmi_owner)) return 1; @@ -132,7 +134,9 @@ void release_perfctr_nmi(unsigned int msr) unsigned int counter; counter = nmi_perfctr_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); + /* register not managed by the allocator? */ + if (counter > NMI_MAX_COUNTER_BITS) + return; clear_bit(counter, perfctr_nmi_owner); } @@ -142,7 +146,9 @@ int reserve_evntsel_nmi(unsigned int msr) unsigned int counter; counter = nmi_evntsel_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); + /* register not managed by the allocator? */ + if (counter > NMI_MAX_COUNTER_BITS) + return 1; if (!test_and_set_bit(counter, evntsel_nmi_owner)) return 1; @@ -154,7 +160,9 @@ void release_evntsel_nmi(unsigned int msr) unsigned int counter; counter = nmi_evntsel_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); + /* register not managed by the allocator? */ + if (counter > NMI_MAX_COUNTER_BITS) + return; clear_bit(counter, evntsel_nmi_owner); } diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 879a0f7..2d42b41 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -85,12 +85,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) /* nothing */ }; struct cpuinfo_x86 *c = v; - int i, n = c - cpu_data; + int i, n = 0; int fpu_exception; #ifdef CONFIG_SMP if (!cpu_online(n)) return 0; + n = c->cpu_index; #endif seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n" @@ -175,11 +176,15 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { - return *pos < NR_CPUS ? cpu_data + *pos : NULL; + if (*pos == 0) /* just in case, cpu 0 is not the first */ + *pos = first_cpu(cpu_possible_map); + if ((*pos) < NR_CPUS && cpu_possible(*pos)) + return &cpu_data(*pos); + return NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { - ++*pos; + *pos = next_cpu(*pos, cpu_possible_map); return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 70dcf91..05c9936 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -114,7 +114,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, static int cpuid_open(struct inode *inode, struct file *file) { unsigned int cpu = iminor(file->f_path.dentry->d_inode); - struct cpuinfo_x86 *c = &(cpu_data)[cpu]; + struct cpuinfo_x86 *c = &cpu_data(cpu); if (cpu >= NR_CPUS || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ @@ -134,15 +134,18 @@ static const struct file_operations cpuid_fops = { .open = cpuid_open, }; -static int __cpuinit cpuid_device_create(int i) +static __cpuinit int cpuid_device_create(int cpu) { - int err = 0; struct device *dev; - dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i); - if (IS_ERR(dev)) - err = PTR_ERR(dev); - return err; + dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), + "cpu%d", cpu); + return IS_ERR(dev) ? PTR_ERR(dev) : 0; +} + +static void cpuid_device_destroy(int cpu) +{ + device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); } static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, @@ -150,18 +153,21 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; + int err = 0; switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - cpuid_device_create(cpu); + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + err = cpuid_device_create(cpu); break; + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); + cpuid_device_destroy(cpu); break; } - return NOTIFY_OK; + return err ? NOTIFY_BAD : NOTIFY_OK; } static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = @@ -198,7 +204,7 @@ static int __init cpuid_init(void) out_class: i = 0; for_each_online_cpu(i) { - device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i)); + cpuid_device_destroy(i); } class_destroy(cpuid_class); out_chrdev: @@ -212,7 +218,7 @@ static void __exit cpuid_exit(void) int cpu = 0; for_each_online_cpu(cpu) - device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); + cpuid_device_destroy(cpu); class_destroy(cpuid_class); unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c index 32e75d0..72d0c56 100644 --- a/arch/x86/kernel/crash_dump_32.c +++ b/arch/x86/kernel/crash_dump_32.c @@ -47,6 +47,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, if (!kdump_buf_page) { printk(KERN_WARNING "Kdump: Kdump buffer page not" " allocated\n"); + kunmap_atomic(vaddr, KM_PTE0); return -EFAULT; } copy_page(kdump_buf_page, vaddr); diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c index 3c86b97..58fd54e 100644 --- a/arch/x86/kernel/e820_32.c +++ b/arch/x86/kernel/e820_32.c @@ -288,7 +288,8 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat request_resource(res, code_resource); request_resource(res, data_resource); #ifdef CONFIG_KEXEC - request_resource(res, &crashk_res); + if (crashk_res.start != crashk_res.end) + request_resource(res, &crashk_res); #endif } } @@ -705,7 +706,7 @@ void __init e820_register_memory(void) int i; /* - * Search for the bigest gap in the low 32 bits of the e820 + * Search for the biggest gap in the low 32 bits of the e820 * memory space. */ last = 0x100000000ull; diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index e422b81..5761686 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -226,7 +226,8 @@ void __init e820_reserve_resources(void) request_resource(res, &code_resource); request_resource(res, &data_resource); #ifdef CONFIG_KEXEC - request_resource(res, &crashk_res); + if (crashk_res.start != crashk_res.end) + request_resource(res, &crashk_res); #endif } } diff --git a/arch/x86/kernel/early-quirks_64.c b/arch/x86/kernel/early-quirks.c index 13aa4fd..dc34acb 100644 --- a/arch/x86/kernel/early-quirks_64.c +++ b/arch/x86/kernel/early-quirks.c @@ -13,9 +13,13 @@ #include <linux/acpi.h> #include <linux/pci_ids.h> #include <asm/pci-direct.h> -#include <asm/proto.h> -#include <asm/iommu.h> #include <asm/dma.h> +#include <asm/io_apic.h> +#include <asm/apic.h> + +#ifdef CONFIG_IOMMU +#include <asm/iommu.h> +#endif static void __init via_bugs(void) { @@ -23,7 +27,8 @@ static void __init via_bugs(void) if ((end_pfn > MAX_DMA32_PFN || force_iommu) && !iommu_aperture_allowed) { printk(KERN_INFO - "Looks like a VIA chipset. Disabling IOMMU. Override with iommu=allowed\n"); + "Looks like a VIA chipset. Disabling IOMMU." + " Override with iommu=allowed\n"); iommu_aperture_disabled = 1; } #endif @@ -40,6 +45,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header) static void __init nvidia_bugs(void) { #ifdef CONFIG_ACPI +#ifdef CONFIG_X86_IO_APIC /* * All timer overrides on Nvidia are * wrong unless HPET is enabled. @@ -59,17 +65,20 @@ static void __init nvidia_bugs(void) "try acpi_use_timer_override\n"); } #endif +#endif /* RED-PEN skip them on mptables too? */ } static void __init ati_bugs(void) { +#ifdef CONFIG_X86_IO_APIC if (timer_over_8254 == 1) { timer_over_8254 = 0; printk(KERN_INFO - "ATI board detected. Disabling timer routing over 8254.\n"); + "ATI board detected. Disabling timer routing over 8254.\n"); } +#endif } struct chipset { @@ -104,7 +113,7 @@ void __init early_quirks(void) if (class == 0xffffffff) break; - if ((class >> 16) != PCI_CLASS_BRIDGE_PCI) + if ((class >> 16) != PCI_CLASS_BRIDGE_PCI) continue; vendor = read_pci_config(num, slot, func, diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 4ae03e3..ce703e2 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -24,10 +24,19 @@ #include <acpi/acpi_bus.h> #endif -/* which logical CPU number maps to which CPU (physical APIC ID) */ -u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly +/* + * which logical CPU number maps to which CPU (physical APIC ID) + * + * The following static array is used during kernel startup + * and the x86_cpu_to_apicid_ptr contains the address of the + * array during this time. Is it zeroed when the per_cpu + * data area is removed. + */ +u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; -EXPORT_SYMBOL(x86_cpu_to_apicid); +void *x86_cpu_to_apicid_ptr; +DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); struct genapic __read_mostly *genapic = &apic_flat; diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 91c7526..07352b7 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -172,7 +172,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) */ cpu = first_cpu(cpumask); if ((unsigned)cpu < NR_CPUS) - return x86_cpu_to_apicid[cpu]; + return per_cpu(x86_cpu_to_apicid, cpu); else return BAD_APICID; } diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index a7eee0a..6b34693 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -58,7 +58,7 @@ void __init x86_64_start_kernel(char * real_mode_data) for (i = 0; i < IDT_ENTRIES; i++) set_intr_gate(i, early_idt_handler); - asm volatile("lidt %0" :: "m" (idt_descr)); + load_idt((const struct desc_ptr *)&idt_descr); early_printk("Kernel alive\n"); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index f836707..53303f2 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -69,12 +69,15 @@ static inline void hpet_clear_mapping(void) * HPET command line enable / disable */ static int boot_hpet_disable; +int hpet_force_user; static int __init hpet_setup(char* str) { if (str) { if (!strncmp("disable", str, 7)) boot_hpet_disable = 1; + if (!strncmp("force", str, 5)) + hpet_force_user = 1; } return 1; } @@ -350,7 +353,7 @@ static int hpet_clocksource_register(void) * * hpet period is in femto seconds per cycle * so we need to convert this to ns/cyc units - * aproximated by mult/2^shift + * approximated by mult/2^shift * * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift * fsec/cyc * 1ns/1000000fsec * 2^shift = mult diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 5cc8841..a42c807 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -86,7 +86,7 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) * On UP the PIT can serve all of the possible timer functions. On SMP systems * it can be solely used for the global tick. * - * The profiling and update capabilites are switched off once the local apic is + * The profiling and update capabilities are switched off once the local apic is * registered. This mechanism replaces the previous #ifdef LOCAL_APIC - * !using_apic_timer decisions in do_timer_interrupt_hook() */ diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c index d34a10c..f634fc7 100644 --- a/arch/x86/kernel/i8259_32.c +++ b/arch/x86/kernel/i8259_32.c @@ -403,7 +403,8 @@ void __init native_init_IRQ(void) int vector = FIRST_EXTERNAL_VECTOR + i; if (i >= NR_IRQS) break; - if (vector != SYSCALL_VECTOR) + /* SYSCALL_VECTOR was reserved in trap_init. */ + if (!test_bit(vector, used_vectors)) set_intr_gate(vector, interrupt[i]); } diff --git a/arch/x86/kernel/init_task_32.c b/arch/x86/kernel/init_task.c index d26fc06..468c9c4 100644 --- a/arch/x86/kernel/init_task_32.c +++ b/arch/x86/kernel/init_task.c @@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); - EXPORT_SYMBOL(init_mm); /* @@ -25,7 +24,7 @@ EXPORT_SYMBOL(init_mm); * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ -union thread_union init_thread_union +union thread_union init_thread_union __attribute__((__section__(".data.init_task"))) = { INIT_THREAD_INFO(init_task) }; @@ -35,12 +34,14 @@ union thread_union init_thread_union * All other task structs will be allocated on slabs in fork.c */ struct task_struct init_task = INIT_TASK(init_task); - EXPORT_SYMBOL(init_task); /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, - * no more per-task TSS's. - */ + * no more per-task TSS's. The TSS size is kept cacheline-aligned + * so they are allowed to end up in the .data.cacheline_aligned + * section. Since TSS's are completely CPU-local, we want them + * on exact cacheline boundaries, to eliminate cacheline ping-pong. + */ DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; diff --git a/arch/x86/kernel/init_task_64.c b/arch/x86/kernel/init_task_64.c deleted file mode 100644 index 4ff33d4..0000000 --- a/arch/x86/kernel/init_task_64.c +++ /dev/null @@ -1,54 +0,0 @@ -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/init_task.h> -#include <linux/fs.h> -#include <linux/mqueue.h> - -#include <asm/uaccess.h> -#include <asm/pgtable.h> -#include <asm/desc.h> - -static struct fs_struct init_fs = INIT_FS; -static struct files_struct init_files = INIT_FILES; -static struct signal_struct init_signals = INIT_SIGNALS(init_signals); -static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - -/* - * Initial task structure. - * - * We need to make sure that this is 8192-byte aligned due to the - * way process stacks are handled. This is done by having a special - * "init_task" linker map entry.. - */ -union thread_union init_thread_union - __attribute__((__section__(".data.init_task"))) = - { INIT_THREAD_INFO(init_task) }; - -/* - * Initial task structure. - * - * All other task structs will be allocated on slabs in fork.c - */ -struct task_struct init_task = INIT_TASK(init_task); - -EXPORT_SYMBOL(init_task); -/* - * per-CPU TSS segments. Threads are completely 'soft' on Linux, - * no more per-task TSS's. The TSS size is kept cacheline-aligned - * so they are allowed to end up in the .data.cacheline_aligned - * section. Since TSS's are completely CPU-local, we want them - * on exact cacheline boundaries, to eliminate cacheline ping-pong. - */ -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; - -/* Copies of the original ist values from the tss are only accessed during - * debugging, no special alignment required. - */ -DEFINE_PER_CPU(struct orig_ist, orig_ist); - -#define ALIGN_TO_4K __attribute__((section(".data.init_task"))) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 5f10c71..f35c6eb 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -584,7 +584,7 @@ tryanotherirq: imbalance = move_this_load; - /* For physical_balance case, we accumlated both load + /* For physical_balance case, we accumulated both load * values in the one of the siblings cpu_irq[], * to use the same code for physical and logical processors * as much as possible. @@ -1198,7 +1198,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 } static int __assign_irq_vector(int irq) { static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; - int vector, offset, i; + int vector, offset; BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); @@ -1215,11 +1215,8 @@ next: } if (vector == current_vector) return -ENOSPC; - if (vector == SYSCALL_VECTOR) + if (test_and_set_bit(vector, used_vectors)) goto next; - for (i = 0; i < NR_IRQ_VECTORS; i++) - if (irq_vector[i] == vector) - goto next; current_vector = vector; current_offset = offset; @@ -2295,6 +2292,12 @@ static inline void __init check_timer(void) void __init setup_IO_APIC(void) { + int i; + + /* Reserve all the system vectors. */ + for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++) + set_bit(i, used_vectors); + enable_IO_APIC(); if (acpi_ioapic) @@ -2472,7 +2475,7 @@ void destroy_irq(unsigned int irq) } /* - * MSI mesage composition + * MSI message composition */ #ifdef CONFIG_PCI_MSI static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 1c2c7bf..b3c2d26 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1770,7 +1770,7 @@ __setup("no_timer_check", notimercheck); /* * - * IRQ's that are handled by the PIC in the MPS IOAPIC case. + * IRQs that are handled by the PIC in the MPS IOAPIC case. * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. * Linux doesn't really care, as it's not actually used * for any interrupt handling anyway. @@ -1921,7 +1921,7 @@ void destroy_irq(unsigned int irq) } /* - * MSI mesage composition + * MSI message composition */ #ifdef CONFIG_PCI_MSI static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 8459ca6..11b935f 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -149,28 +149,6 @@ NORET_TYPE void machine_kexec(struct kimage *image) image->start, cpu_has_pae); } -/* crashkernel=size@addr specifies the location to reserve for - * a crash kernel. By reserving this memory we guarantee - * that linux never sets it up as a DMA target. - * Useful for holding code to do something appropriate - * after a kernel panic. - */ -static int __init parse_crashkernel(char *arg) -{ - unsigned long size, base; - size = memparse(arg, &arg); - if (*arg == '@') { - base = memparse(arg+1, &arg); - /* FIXME: Do I want a sanity check - * to validate the memory range? - */ - crashk_res.start = base; - crashk_res.end = base + size - 1; - } - return 0; -} -early_param("crashkernel", parse_crashkernel); - void arch_crash_save_vmcoreinfo(void) { #ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 7450b69..0d8577f 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -231,33 +231,6 @@ NORET_TYPE void machine_kexec(struct kimage *image) image->start); } -/* crashkernel=size@addr specifies the location to reserve for - * a crash kernel. By reserving this memory we guarantee - * that linux never set's it up as a DMA target. - * Useful for holding code to do something appropriate - * after a kernel panic. - */ -static int __init setup_crashkernel(char *arg) -{ - unsigned long size, base; - char *p; - if (!arg) - return -EINVAL; - size = memparse(arg, &p); - if (arg == p) - return -EINVAL; - if (*p == '@') { - base = memparse(p+1, &p); - /* FIXME: Do I want a sanity check to validate the - * memory range? Yes you do, but it's too early for - * e820 -AK */ - crashk_res.start = base; - crashk_res.end = base + size - 1; - } - return 0; -} -early_param("crashkernel", setup_crashkernel); - void arch_crash_save_vmcoreinfo(void) { #ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c index 8ca8f86..07bbfe7 100644 --- a/arch/x86/kernel/mce_64.c +++ b/arch/x86/kernel/mce_64.c @@ -320,7 +320,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) #ifdef CONFIG_X86_MCE_INTEL /*** * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog - * @cpu: The CPU on which the event occured. + * @cpu: The CPU on which the event occurred. * @status: Event status information * * This function should be called by the thermal interrupt after the @@ -688,7 +688,7 @@ static int __init mcheck_disable(char *str) return 1; } -/* mce=off disables machine check. Note you can reenable it later +/* mce=off disables machine check. Note you can re-enable it later using sysfs. mce=TOLERANCELEVEL (number, see above) mce=bootlog Log MCEs from before booting. Disabled by default on AMD. @@ -799,19 +799,33 @@ static __cpuinit int mce_create_device(unsigned int cpu) { int err; int i; - if (!mce_available(&cpu_data[cpu])) + + if (!mce_available(&cpu_data(cpu))) return -EIO; + memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); per_cpu(device_mce,cpu).id = cpu; per_cpu(device_mce,cpu).cls = &mce_sysclass; err = sysdev_register(&per_cpu(device_mce,cpu)); + if (err) + return err; + + for (i = 0; mce_attributes[i]; i++) { + err = sysdev_create_file(&per_cpu(device_mce,cpu), + mce_attributes[i]); + if (err) + goto error; + } - if (!err) { - for (i = 0; mce_attributes[i]; i++) - sysdev_create_file(&per_cpu(device_mce,cpu), - mce_attributes[i]); + return 0; +error: + while (i--) { + sysdev_remove_file(&per_cpu(device_mce,cpu), + mce_attributes[i]); } + sysdev_unregister(&per_cpu(device_mce,cpu)); + return err; } @@ -823,7 +837,6 @@ static void mce_remove_device(unsigned int cpu) sysdev_remove_file(&per_cpu(device_mce,cpu), mce_attributes[i]); sysdev_unregister(&per_cpu(device_mce,cpu)); - memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ @@ -831,18 +844,21 @@ static int mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; + int err = 0; switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - mce_create_device(cpu); + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + err = mce_create_device(cpu); break; + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: mce_remove_device(cpu); break; } - return NOTIFY_OK; + return err ? NOTIFY_BAD : NOTIFY_OK; } static struct notifier_block mce_cpu_notifier = { @@ -857,9 +873,13 @@ static __init int mce_init_device(void) if (!mce_available(&boot_cpu_data)) return -EIO; err = sysdev_class_register(&mce_sysclass); + if (err) + return err; for_each_online_cpu(i) { - mce_create_device(i); + err = mce_create_device(i); + if (err) + return err; } register_hotcpu_notifier(&mce_cpu_notifier); diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c index 0d2afd9..752fb16 100644 --- a/arch/x86/kernel/mce_amd_64.c +++ b/arch/x86/kernel/mce_amd_64.c @@ -472,11 +472,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) sprintf(name, "threshold_bank%i", bank); #ifdef CONFIG_SMP - if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ + if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ i = first_cpu(per_cpu(cpu_core_map, cpu)); /* first core not up yet */ - if (cpu_data[i].cpu_core_id) + if (cpu_data(i).cpu_core_id) goto out; /* already linked */ diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 09cf781..09c3152 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c @@ -132,7 +132,7 @@ static struct ucode_cpu_info { static void collect_cpu_info(int cpu_num) { - struct cpuinfo_x86 *c = cpu_data + cpu_num; + struct cpuinfo_x86 *c = &cpu_data(cpu_num); struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; unsigned int val[2]; @@ -522,7 +522,7 @@ static struct platform_device *microcode_pdev; static int cpu_request_microcode(int cpu) { char name[30]; - struct cpuinfo_x86 *c = cpu_data + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); const struct firmware *firmware; void *buf; unsigned long size; @@ -570,7 +570,7 @@ static int cpu_request_microcode(int cpu) static int apply_microcode_check_cpu(int cpu) { - struct cpuinfo_x86 *c = cpu_data + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; cpumask_t old; unsigned int val[2]; diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 13abb4e..7a05a7f 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -1001,7 +1001,7 @@ void __init mp_config_acpi_legacy_irqs (void) /* * Use the default configuration for the IRQs 0-15. Unless - * overriden by (MADT) interrupt source override entries. + * overridden by (MADT) interrupt source override entries. */ for (i = 0; i < 16; i++) { int idx; diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 8bf0ca0..ef4aab1 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0; /* Processor that is doing the boot up */ unsigned int boot_cpu_id = -1U; +EXPORT_SYMBOL(boot_cpu_id); + /* Internal processor count */ unsigned int num_processors __cpuinitdata = 0; @@ -86,7 +88,7 @@ static int __init mpf_checksum(unsigned char *mp, int len) return sum & 0xFF; } -static void __cpuinit MP_processor_info (struct mpc_config_processor *m) +static void __cpuinit MP_processor_info(struct mpc_config_processor *m) { int cpu; cpumask_t tmp_map; @@ -123,7 +125,18 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) cpu = 0; } bios_cpu_apicid[cpu] = m->mpc_apicid; - x86_cpu_to_apicid[cpu] = m->mpc_apicid; + /* + * We get called early in the the start_kernel initialization + * process when the per_cpu data area is not yet setup, so we + * use a static array that is removed after the per_cpu data + * area is created. + */ + if (x86_cpu_to_apicid_ptr) { + u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr; + x86_cpu_to_apicid[cpu] = m->mpc_apicid; + } else { + per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; + } cpu_set(cpu, cpu_possible_map); cpu_set(cpu, cpu_present_map); diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index df85c9c..ee6eba4 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -112,7 +112,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf, static int msr_open(struct inode *inode, struct file *file) { unsigned int cpu = iminor(file->f_path.dentry->d_inode); - struct cpuinfo_x86 *c = &(cpu_data)[cpu]; + struct cpuinfo_x86 *c = &cpu_data(cpu); if (cpu >= NR_CPUS || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ @@ -133,37 +133,42 @@ static const struct file_operations msr_fops = { .open = msr_open, }; -static int __cpuinit msr_device_create(int i) +static int __cpuinit msr_device_create(int cpu) { - int err = 0; struct device *dev; - dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), "msr%d",i); - if (IS_ERR(dev)) - err = PTR_ERR(dev); - return err; + dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), + "msr%d", cpu); + return IS_ERR(dev) ? PTR_ERR(dev) : 0; +} + +static void msr_device_destroy(int cpu) +{ + device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); } static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; + int err = 0; switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - msr_device_create(cpu); + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + err = msr_device_create(cpu); break; + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); + msr_device_destroy(cpu); break; } - return NOTIFY_OK; + return err ? NOTIFY_BAD : NOTIFY_OK; } -static struct notifier_block __cpuinitdata msr_class_cpu_notifier = -{ +static struct notifier_block __cpuinitdata msr_class_cpu_notifier = { .notifier_call = msr_class_cpu_callback, }; @@ -196,7 +201,7 @@ static int __init msr_init(void) out_class: i = 0; for_each_online_cpu(i) - device_destroy(msr_class, MKDEV(MSR_MAJOR, i)); + msr_device_destroy(i); class_destroy(msr_class); out_chrdev: unregister_chrdev(MSR_MAJOR, "cpu/msr"); @@ -208,7 +213,7 @@ static void __exit msr_exit(void) { int cpu = 0; for_each_online_cpu(cpu) - device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); + msr_device_destroy(cpu); class_destroy(msr_class); unregister_chrdev(MSR_MAJOR, "cpu/msr"); unregister_hotcpu_notifier(&msr_class_cpu_notifier); diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c index b2b42bd..afaf9f1 100644 --- a/arch/x86/kernel/pci-dma_64.c +++ b/arch/x86/kernel/pci-dma_64.c @@ -11,7 +11,7 @@ #include <asm/iommu.h> #include <asm/calgary.h> -int iommu_merge __read_mostly = 0; +int iommu_merge __read_mostly = 1; EXPORT_SYMBOL(iommu_merge); dma_addr_t bad_dma_address __read_mostly; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 097aeaf..7b89958 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -295,34 +295,52 @@ static int __init idle_setup(char *str) } early_param("idle", idle_setup); -void show_regs(struct pt_regs * regs) +void __show_registers(struct pt_regs *regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; + unsigned long esp; + unsigned short ss, gs; + + if (user_mode_vm(regs)) { + esp = regs->esp; + ss = regs->xss & 0xffff; + savesegment(gs, gs); + } else { + esp = (unsigned long) (®s->esp); + savesegment(ss, ss); + savesegment(gs, gs); + } printk("\n"); - printk("Pid: %d, comm: %20s\n", current->pid, current->comm); - printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); + printk("Pid: %d, comm: %s %s (%s %.*s)\n", + task_pid_nr(current), current->comm, + print_tainted(), init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + + printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", + 0xffff & regs->xcs, regs->eip, regs->eflags, + smp_processor_id()); print_symbol("EIP is at %s\n", regs->eip); - if (user_mode_vm(regs)) - printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); - printk(" EFLAGS: %08lx %s (%s %.*s)\n", - regs->eflags, print_tainted(), init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", - regs->eax,regs->ebx,regs->ecx,regs->edx); - printk("ESI: %08lx EDI: %08lx EBP: %08lx", - regs->esi, regs->edi, regs->ebp); - printk(" DS: %04x ES: %04x FS: %04x\n", - 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs); + regs->eax, regs->ebx, regs->ecx, regs->edx); + printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", + regs->esi, regs->edi, regs->ebp, esp); + printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", + regs->xds & 0xffff, regs->xes & 0xffff, + regs->xfs & 0xffff, gs, ss); + + if (!all) + return; cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4_safe(); - printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); + printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", + cr0, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); @@ -330,10 +348,16 @@ void show_regs(struct pt_regs * regs) get_debugreg(d3, 3); printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", d0, d1, d2, d3); + get_debugreg(d6, 6); get_debugreg(d7, 7); - printk("DR6: %08lx DR7: %08lx\n", d6, d7); + printk("DR6: %08lx DR7: %08lx\n", + d6, d7); +} +void show_regs(struct pt_regs *regs) +{ + __show_registers(regs, 1); show_trace(NULL, regs, ®s->esp); } diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c index 99102ec..ff5431c 100644 --- a/arch/x86/kernel/ptrace_32.c +++ b/arch/x86/kernel/ptrace_32.c @@ -632,7 +632,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) /* User-mode eip? */ info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL; - /* Send us the fakey SIGTRAP */ + /* Send us the fake SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); } diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index d769e20..a4ce191 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -45,9 +45,12 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) if (!(config & 0x2)) pci_write_config_byte(dev, 0xf4, config); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, + quirk_intel_irqbalance); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, + quirk_intel_irqbalance); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, + quirk_intel_irqbalance); #endif #if defined(CONFIG_HPET_TIMER) @@ -56,7 +59,8 @@ unsigned long force_hpet_address; static enum { NONE_FORCE_HPET_RESUME, OLD_ICH_FORCE_HPET_RESUME, - ICH_FORCE_HPET_RESUME + ICH_FORCE_HPET_RESUME, + VT8237_FORCE_HPET_RESUME } force_hpet_resume_type; static void __iomem *rcba_base; @@ -146,17 +150,17 @@ static void ich_force_enable_hpet(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, - ich_force_enable_hpet); + ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, - ich_force_enable_hpet); + ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, - ich_force_enable_hpet); + ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, - ich_force_enable_hpet); + ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, - ich_force_enable_hpet); + ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, - ich_force_enable_hpet); + ich_force_enable_hpet); static struct pci_dev *cached_dev; @@ -232,10 +236,91 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev) printk(KERN_DEBUG "Failed to force enable HPET\n"); } +/* + * Undocumented chipset features. Make sure that the user enforced + * this. + */ +static void old_ich_force_enable_hpet_user(struct pci_dev *dev) +{ + if (hpet_force_user) + old_ich_force_enable_hpet(dev); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, + old_ich_force_enable_hpet_user); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, + old_ich_force_enable_hpet_user); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, + old_ich_force_enable_hpet_user); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, + old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, - old_ich_force_enable_hpet); + old_ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, - old_ich_force_enable_hpet); + old_ich_force_enable_hpet); + + +static void vt8237_force_hpet_resume(void) +{ + u32 val; + + if (!force_hpet_address || !cached_dev) + return; + + val = 0xfed00000 | 0x80; + pci_write_config_dword(cached_dev, 0x68, val); + + pci_read_config_dword(cached_dev, 0x68, &val); + if (val & 0x80) + printk(KERN_DEBUG "Force enabled HPET at resume\n"); + else + BUG(); +} + +static void vt8237_force_enable_hpet(struct pci_dev *dev) +{ + u32 uninitialized_var(val); + + if (!hpet_force_user || hpet_address || force_hpet_address) + return; + + pci_read_config_dword(dev, 0x68, &val); + /* + * Bit 7 is HPET enable bit. + * Bit 31:10 is HPET base address (contrary to what datasheet claims) + */ + if (val & 0x80) { + force_hpet_address = (val & ~0x3ff); + printk(KERN_DEBUG "HPET at base address 0x%lx\n", + force_hpet_address); + return; + } + + /* + * HPET is disabled. Trying enabling at FED00000 and check + * whether it sticks + */ + val = 0xfed00000 | 0x80; + pci_write_config_dword(dev, 0x68, val); + + pci_read_config_dword(dev, 0x68, &val); + if (val & 0x80) { + force_hpet_address = (val & ~0x3ff); + printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", + force_hpet_address); + cached_dev = dev; + force_hpet_resume_type = VT8237_FORCE_HPET_RESUME; + return; + } + + printk(KERN_DEBUG "Failed to force enable HPET\n"); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, + vt8237_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, + vt8237_force_enable_hpet); + void force_hpet_resume(void) { @@ -246,6 +331,9 @@ void force_hpet_resume(void) case OLD_ICH_FORCE_HPET_RESUME: return old_ich_force_hpet_resume(); + case VT8237_FORCE_HPET_RESUME: + return vt8237_force_hpet_resume(); + default: break; } diff --git a/arch/x86/kernel/reboot_64.c b/arch/x86/kernel/reboot_64.c index 368db2b..776eb06 100644 --- a/arch/x86/kernel/reboot_64.c +++ b/arch/x86/kernel/reboot_64.c @@ -11,6 +11,7 @@ #include <linux/sched.h> #include <asm/io.h> #include <asm/delay.h> +#include <asm/desc.h> #include <asm/hw_irq.h> #include <asm/system.h> #include <asm/pgtable.h> @@ -136,7 +137,7 @@ void machine_emergency_restart(void) } case BOOT_TRIPLE: - __asm__ __volatile__("lidt (%0)": :"r" (&no_idt)); + load_idt((const struct desc_ptr *)&no_idt); __asm__ __volatile__("int3"); reboot_type = BOOT_KBD; diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c index 8b30b26..1a07bbe 100644 --- a/arch/x86/kernel/reboot_fixups_32.c +++ b/arch/x86/kernel/reboot_fixups_32.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <asm/reboot_fixups.h> #include <asm/msr.h> +#include <asm/geode.h> static void cs5530a_warm_reset(struct pci_dev *dev) { @@ -24,11 +25,8 @@ static void cs5530a_warm_reset(struct pci_dev *dev) static void cs5536_warm_reset(struct pci_dev *dev) { - /* - * 6.6.2.12 Soft Reset (DIVIL_SOFT_RESET) - * writing 1 to the LSB of this MSR causes a hard reset. - */ - wrmsrl(0x51400017, 1ULL); + /* writing 1 to the LSB of this MSR causes a hard reset */ + wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL); udelay(50); /* shouldn't get here but be safe and spin a while */ } diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index ba91882..3558ac7 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c @@ -185,6 +185,12 @@ void __cpuinit check_efer(void) unsigned long kernel_eflags; /* + * Copies of the original ist values from the tss are only accessed during + * debugging, no special alignment required. + */ +DEFINE_PER_CPU(struct orig_ist, orig_ist); + +/* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT * and IDT. We reload them nevertheless, this function acts as a @@ -224,8 +230,8 @@ void __cpuinit cpu_init (void) memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE); cpu_gdt_descr[cpu].size = GDT_SIZE; - asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu])); - asm volatile("lidt %0" :: "m" (idt_descr)); + load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]); + load_idt((const struct desc_ptr *)&idt_descr); memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); syscall_init(); diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index b87a6fd..ba2e165 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -378,6 +378,49 @@ extern unsigned long __init setup_memory(void); extern void zone_sizes_init(void); #endif /* !CONFIG_NEED_MULTIPLE_NODES */ +static inline unsigned long long get_total_mem(void) +{ + unsigned long long total; + + total = max_low_pfn - min_low_pfn; +#ifdef CONFIG_HIGHMEM + total += highend_pfn - highstart_pfn; +#endif + + return total << PAGE_SHIFT; +} + +#ifdef CONFIG_KEXEC +static void __init reserve_crashkernel(void) +{ + unsigned long long total_mem; + unsigned long long crash_size, crash_base; + int ret; + + total_mem = get_total_mem(); + + ret = parse_crashkernel(boot_command_line, total_mem, + &crash_size, &crash_base); + if (ret == 0 && crash_size > 0) { + if (crash_base > 0) { + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(total_mem >> 20)); + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + reserve_bootmem(crash_base, crash_size); + } else + printk(KERN_INFO "crashkernel reservation failed - " + "you have to specify a base address\n"); + } +} +#else +static inline void __init reserve_crashkernel(void) +{} +#endif + void __init setup_bootmem_allocator(void) { unsigned long bootmap_size; @@ -453,11 +496,7 @@ void __init setup_bootmem_allocator(void) } } #endif -#ifdef CONFIG_KEXEC - if (crashk_res.start != crashk_res.end) - reserve_bootmem(crashk_res.start, - crashk_res.end - crashk_res.start + 1); -#endif + reserve_crashkernel(); } /* @@ -585,7 +624,7 @@ void __init setup_arch(char **cmdline_p) /* * NOTE: before this point _nobody_ is allowed to allocate * any memory using the bootmem allocator. Although the - * alloctor is now initialised only the first 8Mb of the kernel + * allocator is now initialised only the first 8Mb of the kernel * virtual address space has been mapped. All allocations before * paging_init() has completed must use the alloc_bootmem_low_pages() * variant (which allocates DMA'able memory) and care must be taken @@ -622,9 +661,7 @@ void __init setup_arch(char **cmdline_p) #endif #ifdef CONFIG_PCI -#ifdef CONFIG_X86_IO_APIC - check_acpi_pci(); /* Checks more than just ACPI actually */ -#endif + early_quirks(); #endif #ifdef CONFIG_ACPI diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 5a19f0c..31322d4 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -191,6 +191,37 @@ static inline void copy_edd(void) } #endif +#ifdef CONFIG_KEXEC +static void __init reserve_crashkernel(void) +{ + unsigned long long free_mem; + unsigned long long crash_size, crash_base; + int ret; + + free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; + + ret = parse_crashkernel(boot_command_line, free_mem, + &crash_size, &crash_base); + if (ret == 0 && crash_size) { + if (crash_base > 0) { + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(free_mem >> 20)); + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + reserve_bootmem(crash_base, crash_size); + } else + printk(KERN_INFO "crashkernel reservation failed - " + "you have to specify a base address\n"); + } +} +#else +static inline void __init reserve_crashkernel(void) +{} +#endif + #define EBDA_ADDR_POINTER 0x40E unsigned __initdata ebda_addr; @@ -271,6 +302,11 @@ void __init setup_arch(char **cmdline_p) dmi_scan_machine(); +#ifdef CONFIG_SMP + /* setup to use the static apicid table during kernel startup */ + x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init; +#endif + #ifdef CONFIG_ACPI /* * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). @@ -357,13 +393,7 @@ void __init setup_arch(char **cmdline_p) } } #endif -#ifdef CONFIG_KEXEC - if (crashk_res.start != crashk_res.end) { - reserve_bootmem_generic(crashk_res.start, - crashk_res.end - crashk_res.start + 1); - } -#endif - + reserve_crashkernel(); paging_init(); #ifdef CONFIG_PCI @@ -529,7 +559,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) but in the same order as the HT nodeids. If that doesn't result in a usable node fall back to the path for the previous case. */ - int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits); + int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits); if (ht_nodeid >= 0 && apicid_to_node[ht_nodeid] != NUMA_NO_NODE) node = apicid_to_node[ht_nodeid]; @@ -853,6 +883,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) #ifdef CONFIG_SMP c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; + c->cpu_index = 0; #endif } @@ -959,6 +990,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; + int cpu = 0; /* * These flag bits must match the definitions in <asm/cpufeature.h>. @@ -1037,8 +1069,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) #ifdef CONFIG_SMP - if (!cpu_online(c-cpu_data)) + if (!cpu_online(c->cpu_index)) return 0; + cpu = c->cpu_index; #endif seq_printf(m,"processor\t: %u\n" @@ -1046,7 +1079,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) "cpu family\t: %d\n" "model\t\t: %d\n" "model name\t: %s\n", - (unsigned)(c-cpu_data), + (unsigned)cpu, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86, (int)c->x86_model, @@ -1058,7 +1091,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "stepping\t: unknown\n"); if (cpu_has(c,X86_FEATURE_TSC)) { - unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data)); + unsigned int freq = cpufreq_quick_get((unsigned)cpu); if (!freq) freq = cpu_khz; seq_printf(m, "cpu MHz\t\t: %u.%03u\n", @@ -1071,7 +1104,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) #ifdef CONFIG_SMP if (smp_num_siblings * c->x86_max_cores > 1) { - int cpu = c - cpu_data; seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); seq_printf(m, "siblings\t: %d\n", cpus_weight(per_cpu(cpu_core_map, cpu))); @@ -1129,12 +1161,16 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { - return *pos < NR_CPUS ? cpu_data + *pos : NULL; + if (*pos == 0) /* just in case, cpu 0 is not the first */ + *pos = first_cpu(cpu_possible_map); + if ((*pos) < NR_CPUS && cpu_possible(*pos)) + return &cpu_data(*pos); + return NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { - ++*pos; + *pos = next_cpu(*pos, cpu_possible_map); return c_start(m, pos); } diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 0d79df3..9bdd830 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -200,8 +200,8 @@ badframe: if (show_unhandled_signals && printk_ratelimit()) printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx" " esp:%lx oeax:%lx\n", - current->pid > 1 ? KERN_INFO : KERN_EMERG, - current->comm, current->pid, frame, regs->eip, + task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, + current->comm, task_pid_nr(current), frame, regs->eip, regs->esp, regs->orig_eax); force_sig(SIGSEGV, current); @@ -594,7 +594,7 @@ static void fastcall do_signal(struct pt_regs *regs) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* Reenable any watchpoints before delivering the + /* Re-enable any watchpoints before delivering the * signal to user space. The processor register will * have been cleared if the watchpoint triggered * inside the kernel. diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 683802b..ab086b0 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -410,7 +410,7 @@ static void do_signal(struct pt_regs *regs) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* Reenable any watchpoints before delivering the + /* Re-enable any watchpoints before delivering the * signal to user space. The processor register will * have been cleared if the watchpoint triggered * inside the kernel. diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index 791d9f8..f321153 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -69,7 +69,7 @@ * * B stepping CPUs may hang. There are hardware work arounds * for this. We warn about it in case your board doesn't have the work - * arounds. Basically thats so I can tell anyone with a B stepping + * arounds. Basically that's so I can tell anyone with a B stepping * CPU and SMP problems "tough". * * Specific items [From Pentium Processor Specification Update] @@ -273,7 +273,7 @@ void leave_mm(unsigned long cpu) * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but smp_invalidate_interrupt ignore flush ipis - * for the wrong mm, and in the worst case we perform a superflous + * for the wrong mm, and in the worst case we perform a superfluous * tlb flush. * 1a2) set cpu_tlbstate to TLBSTATE_OK * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 @@ -610,7 +610,7 @@ static void stop_this_cpu (void * dummy) */ cpu_clear(smp_processor_id(), cpu_online_map); disable_local_APIC(); - if (cpu_data[smp_processor_id()].hlt_works_ok) + if (cpu_data(smp_processor_id()).hlt_works_ok) for(;;) halt(); for (;;); } @@ -676,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id) int i; for (i = 0; i < NR_CPUS; i++) { - if (x86_cpu_to_apicid[i] == apic_id) + if (per_cpu(x86_cpu_to_apicid, i) == apic_id) return i; } return -1; diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 5c29647..03fa6ed 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void) } /* - * this function sends a 'generic call function' IPI to one other CPU - * in the system. - * - * cpu is a standard Linux logical CPU number. + * this function sends a 'generic call function' IPI to all other CPU + * of the system defined in the mask. */ -static void -__smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) + +static int +__smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) { struct call_data_struct data; - int cpus = 1; + cpumask_t allbutself; + int cpus; + + allbutself = cpu_online_map; + cpu_clear(smp_processor_id(), allbutself); + + cpus_and(mask, mask, allbutself); + cpus = cpus_weight(mask); + + if (!cpus) + return 0; data.func = func; data.info = info; @@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info, call_data = &data; wmb(); - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); + + /* Send a message to other CPUs */ + if (cpus_equal(mask, allbutself)) + send_IPI_allbutself(CALL_FUNCTION_VECTOR); + else + send_IPI_mask(mask, CALL_FUNCTION_VECTOR); /* Wait for response */ while (atomic_read(&data.started) != cpus) cpu_relax(); if (!wait) - return; + return 0; while (atomic_read(&data.finished) != cpus) cpu_relax(); + + return 0; +} +/** + * smp_call_function_mask(): Run a function on a set of other CPUs. + * @mask: The set of cpus to run on. Must not include the current cpu. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned; otherwise + * it returns just before the target cpu calls @func. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. + */ +int smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) +{ + int ret; + + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + spin_lock(&call_lock); + ret = __smp_call_function_mask(mask, func, info, wait); + spin_unlock(&call_lock); + return ret; } +EXPORT_SYMBOL(smp_call_function_mask); /* * smp_call_function_single - Run a function on a specific CPU @@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, int nonatomic, int wait) { /* prevent preemption and reschedule on another processor */ + int ret; int me = get_cpu(); /* Can deadlock when called with interrupts disabled */ @@ -387,51 +434,14 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, return 0; } - spin_lock(&call_lock); - __smp_call_function_single(cpu, func, info, nonatomic, wait); - spin_unlock(&call_lock); + ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); + put_cpu(); - return 0; + return ret; } EXPORT_SYMBOL(smp_call_function_single); /* - * this function sends a 'generic call function' IPI to all other CPUs - * in the system. - */ -static void __smp_call_function (void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - struct call_data_struct data; - int cpus = num_online_cpus()-1; - - if (!cpus) - return; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - wmb(); - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_allbutself(CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (!wait) - return; - - while (atomic_read(&data.finished) != cpus) - cpu_relax(); -} - -/* * smp_call_function - run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. @@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info, int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) { - spin_lock(&call_lock); - __smp_call_function(func,info,nonatomic,wait); - spin_unlock(&call_lock); - return 0; + return smp_call_function_mask(cpu_online_map, func, info, wait); } EXPORT_SYMBOL(smp_call_function); @@ -479,7 +486,7 @@ void smp_send_stop(void) /* Don't deadlock on the call lock in panic */ nolock = !spin_trylock(&call_lock); local_irq_save(flags); - __smp_call_function(stop_this_cpu, NULL, 0, 0); + __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0); if (!nolock) spin_unlock(&call_lock); disable_local_APIC(); diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index be3faac..ef0f34e 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -67,7 +67,7 @@ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); /* Last level cache ID of each logical CPU */ -int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; +DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID; /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); @@ -89,12 +89,20 @@ EXPORT_SYMBOL(cpu_possible_map); static cpumask_t smp_commenced_mask; /* Per CPU bogomips and other parameters */ -struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; -EXPORT_SYMBOL(cpu_data); +DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); +EXPORT_PER_CPU_SYMBOL(cpu_info); -u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = - { [0 ... NR_CPUS-1] = 0xff }; -EXPORT_SYMBOL(x86_cpu_to_apicid); +/* + * The following static array is used during kernel startup + * and the x86_cpu_to_apicid_ptr contains the address of the + * array during this time. Is it zeroed when the per_cpu + * data area is removed. + */ +u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata = + { [0 ... NR_CPUS-1] = BAD_APICID }; +void *x86_cpu_to_apicid_ptr; +DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); u8 apicid_2_node[MAX_APICID]; @@ -150,9 +158,10 @@ void __init smp_alloc_memory(void) void __cpuinit smp_store_cpu_info(int id) { - struct cpuinfo_x86 *c = cpu_data + id; + struct cpuinfo_x86 *c = &cpu_data(id); *c = boot_cpu_data; + c->cpu_index = id; if (id!=0) identify_secondary_cpu(c); /* @@ -294,7 +303,7 @@ static int cpucount; /* maps the cpu to the sched domain representing multi-core */ cpumask_t cpu_coregroup_map(int cpu) { - struct cpuinfo_x86 *c = cpu_data + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); /* * For perf, we return last level cache shared map. * And for power savings, we return cpu_core_map @@ -311,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map; void __cpuinit set_cpu_sibling_map(int cpu) { int i; - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(cpu); cpu_set(cpu, cpu_sibling_setup_map); if (smp_num_siblings > 1) { for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (c[cpu].phys_proc_id == c[i].phys_proc_id && - c[cpu].cpu_core_id == c[i].cpu_core_id) { + if (c->phys_proc_id == cpu_data(i).phys_proc_id && + c->cpu_core_id == cpu_data(i).cpu_core_id) { cpu_set(i, per_cpu(cpu_sibling_map, cpu)); cpu_set(cpu, per_cpu(cpu_sibling_map, i)); cpu_set(i, per_cpu(cpu_core_map, cpu)); cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, c[cpu].llc_shared_map); - cpu_set(cpu, c[i].llc_shared_map); + cpu_set(i, c->llc_shared_map); + cpu_set(cpu, cpu_data(i).llc_shared_map); } } } else { cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); } - cpu_set(cpu, c[cpu].llc_shared_map); + cpu_set(cpu, c->llc_shared_map); if (current_cpu_data.x86_max_cores == 1) { per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); - c[cpu].booted_cores = 1; + c->booted_cores = 1; return; } for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (cpu_llc_id[cpu] != BAD_APICID && - cpu_llc_id[cpu] == cpu_llc_id[i]) { - cpu_set(i, c[cpu].llc_shared_map); - cpu_set(cpu, c[i].llc_shared_map); + if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && + per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { + cpu_set(i, c->llc_shared_map); + cpu_set(cpu, cpu_data(i).llc_shared_map); } - if (c[cpu].phys_proc_id == c[i].phys_proc_id) { + if (c->phys_proc_id == cpu_data(i).phys_proc_id) { cpu_set(i, per_cpu(cpu_core_map, cpu)); cpu_set(cpu, per_cpu(cpu_core_map, i)); /* @@ -357,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) * the booted_cores for this new cpu */ if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) - c[cpu].booted_cores++; + c->booted_cores++; /* * increment the core count for all * the other cpus in this package */ if (i != cpu) - c[i].booted_cores++; - } else if (i != cpu && !c[cpu].booted_cores) - c[cpu].booted_cores = c[i].booted_cores; + cpu_data(i).booted_cores++; + } else if (i != cpu && !c->booted_cores) + c->booted_cores = cpu_data(i).booted_cores; } } } @@ -412,7 +421,7 @@ static void __cpuinit start_secondary(void *unused) /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of - * IPI receipients, and the time when the determination is made + * IPI recipients, and the time when the determination is made * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). @@ -804,7 +813,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) irq_ctx_init(cpu); - x86_cpu_to_apicid[cpu] = apicid; + per_cpu(x86_cpu_to_apicid, cpu) = apicid; /* * This grunge runs the startup process for * the targeted processor. @@ -844,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) /* number CPUs logically, starting from 1 (BSP is 0) */ Dprintk("OK.\n"); printk("CPU%d: ", cpu); - print_cpu_info(&cpu_data[cpu]); + print_cpu_info(&cpu_data(cpu)); Dprintk("CPU has booted.\n"); } else { boot_error= 1; @@ -866,7 +875,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ cpucount--; } else { - x86_cpu_to_apicid[cpu] = apicid; + per_cpu(x86_cpu_to_apicid, cpu) = apicid; cpu_set(cpu, cpu_present_map); } @@ -915,7 +924,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) struct warm_boot_cpu_info info; int apicid, ret; - apicid = x86_cpu_to_apicid[cpu]; + apicid = per_cpu(x86_cpu_to_apicid, cpu); if (apicid == BAD_APICID) { ret = -ENODEV; goto exit; @@ -961,11 +970,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus) */ smp_store_cpu_info(0); /* Final full version of the data */ printk("CPU%d: ", 0); - print_cpu_info(&cpu_data[0]); + print_cpu_info(&cpu_data(0)); boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); boot_cpu_logical_apicid = logical_smp_processor_id(); - x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; + per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid; current_thread_info()->cpu = 0; @@ -1008,6 +1017,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); smpboot_clear_io_apic_irqs(); phys_cpu_present_map = physid_mask_of_physid(0); + map_cpu_to_logical_apicid(); cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, per_cpu(cpu_core_map, 0)); return; @@ -1029,6 +1039,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) } smpboot_clear_io_apic_irqs(); phys_cpu_present_map = physid_mask_of_physid(0); + map_cpu_to_logical_apicid(); cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, per_cpu(cpu_core_map, 0)); return; @@ -1082,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) Dprintk("Before bogomips.\n"); for (cpu = 0; cpu < NR_CPUS; cpu++) if (cpu_isset(cpu, cpu_callout_map)) - bogosum += cpu_data[cpu].loops_per_jiffy; + bogosum += cpu_data(cpu).loops_per_jiffy; printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", cpucount+1, @@ -1152,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void) void remove_siblinginfo(int cpu) { int sibling; - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(cpu); for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); @@ -1160,15 +1171,15 @@ void remove_siblinginfo(int cpu) * last thread sibling in this cpu core going down */ if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) - c[sibling].booted_cores--; + cpu_data(sibling).booted_cores--; } for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); cpus_clear(per_cpu(cpu_sibling_map, cpu)); cpus_clear(per_cpu(cpu_core_map, cpu)); - c[cpu].phys_proc_id = 0; - c[cpu].cpu_core_id = 0; + c->phys_proc_id = 0; + c->cpu_core_id = 0; cpu_clear(cpu, cpu_sibling_setup_map); } diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index e351ac4..b7e768d 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -65,7 +65,7 @@ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); /* Last level cache ID of each logical CPU */ -u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; +DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID; /* Bitmask of currently online CPUs */ cpumask_t cpu_online_map __read_mostly; @@ -84,8 +84,8 @@ cpumask_t cpu_possible_map; EXPORT_SYMBOL(cpu_possible_map); /* Per CPU bogomips and other parameters */ -struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; -EXPORT_SYMBOL(cpu_data); +DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); +EXPORT_PER_CPU_SYMBOL(cpu_info); /* Set when the idlers are all forked */ int smp_threads_ready; @@ -138,9 +138,10 @@ static unsigned long __cpuinit setup_trampoline(void) static void __cpuinit smp_store_cpu_info(int id) { - struct cpuinfo_x86 *c = cpu_data + id; + struct cpuinfo_x86 *c = &cpu_data(id); *c = boot_cpu_data; + c->cpu_index = id; identify_cpu(c); print_cpu_info(c); } @@ -237,7 +238,7 @@ void __cpuinit smp_callin(void) /* maps the cpu to the sched domain representing multi-core */ cpumask_t cpu_coregroup_map(int cpu) { - struct cpuinfo_x86 *c = cpu_data + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); /* * For perf, we return last level cache shared map. * And for power savings, we return cpu_core_map @@ -254,41 +255,41 @@ static cpumask_t cpu_sibling_setup_map; static inline void set_cpu_sibling_map(int cpu) { int i; - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(cpu); cpu_set(cpu, cpu_sibling_setup_map); if (smp_num_siblings > 1) { for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (c[cpu].phys_proc_id == c[i].phys_proc_id && - c[cpu].cpu_core_id == c[i].cpu_core_id) { + if (c->phys_proc_id == cpu_data(i).phys_proc_id && + c->cpu_core_id == cpu_data(i).cpu_core_id) { cpu_set(i, per_cpu(cpu_sibling_map, cpu)); cpu_set(cpu, per_cpu(cpu_sibling_map, i)); cpu_set(i, per_cpu(cpu_core_map, cpu)); cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, c[cpu].llc_shared_map); - cpu_set(cpu, c[i].llc_shared_map); + cpu_set(i, c->llc_shared_map); + cpu_set(cpu, cpu_data(i).llc_shared_map); } } } else { cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); } - cpu_set(cpu, c[cpu].llc_shared_map); + cpu_set(cpu, c->llc_shared_map); if (current_cpu_data.x86_max_cores == 1) { per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); - c[cpu].booted_cores = 1; + c->booted_cores = 1; return; } for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (cpu_llc_id[cpu] != BAD_APICID && - cpu_llc_id[cpu] == cpu_llc_id[i]) { - cpu_set(i, c[cpu].llc_shared_map); - cpu_set(cpu, c[i].llc_shared_map); + if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && + per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { + cpu_set(i, c->llc_shared_map); + cpu_set(cpu, cpu_data(i).llc_shared_map); } - if (c[cpu].phys_proc_id == c[i].phys_proc_id) { + if (c->phys_proc_id == cpu_data(i).phys_proc_id) { cpu_set(i, per_cpu(cpu_core_map, cpu)); cpu_set(cpu, per_cpu(cpu_core_map, i)); /* @@ -300,15 +301,15 @@ static inline void set_cpu_sibling_map(int cpu) * the booted_cores for this new cpu */ if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) - c[cpu].booted_cores++; + c->booted_cores++; /* * increment the core count for all * the other cpus in this package */ if (i != cpu) - c[i].booted_cores++; - } else if (i != cpu && !c[cpu].booted_cores) - c[cpu].booted_cores = c[i].booted_cores; + cpu_data(i).booted_cores++; + } else if (i != cpu && !c->booted_cores) + c->booted_cores = cpu_data(i).booted_cores; } } } @@ -350,7 +351,7 @@ void __cpuinit start_secondary(void) /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of - * IPI receipients, and the time when the determination is made + * IPI recipients, and the time when the determination is made * for which cpus receive the IPI in genapic_flat.c. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). @@ -694,7 +695,7 @@ do_rest: clear_node_cpumask(cpu); /* was set by numa_add_cpu */ cpu_clear(cpu, cpu_present_map); cpu_clear(cpu, cpu_possible_map); - x86_cpu_to_apicid[cpu] = BAD_APICID; + per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; return -EIO; } @@ -841,6 +842,26 @@ static int __init smp_sanity_check(unsigned max_cpus) } /* + * Copy apicid's found by MP_processor_info from initial array to the per cpu + * data area. The x86_cpu_to_apicid_init array is then expendable and the + * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no + * longer available. + */ +void __init smp_set_apicids(void) +{ + int cpu; + + for_each_cpu_mask(cpu, cpu_possible_map) { + if (per_cpu_offset(cpu)) + per_cpu(x86_cpu_to_apicid, cpu) = + x86_cpu_to_apicid_init[cpu]; + } + + /* indicate the static array will be going away soon */ + x86_cpu_to_apicid_ptr = NULL; +} + +/* * Prepare for SMP bootup. The MP table or ACPI has been read * earlier. Just do some sanity checking here and enable APIC mode. */ @@ -849,6 +870,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) nmi_watchdog_default(); current_cpu_data = boot_cpu_data; current_thread_info()->cpu = 0; /* needed? */ + smp_set_apicids(); set_cpu_sibling_map(0); if (smp_sanity_check(max_cpus) < 0) { @@ -968,7 +990,7 @@ void __init smp_cpus_done(unsigned int max_cpus) static void remove_siblinginfo(int cpu) { int sibling; - struct cpuinfo_x86 *c = cpu_data; + struct cpuinfo_x86 *c = &cpu_data(cpu); for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); @@ -976,15 +998,15 @@ static void remove_siblinginfo(int cpu) * last thread sibling in this cpu core going down */ if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) - c[sibling].booted_cores--; + cpu_data(sibling).booted_cores--; } for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); cpus_clear(per_cpu(cpu_sibling_map, cpu)); cpus_clear(per_cpu(cpu_core_map, cpu)); - c[cpu].phys_proc_id = 0; - c[cpu].cpu_core_id = 0; + c->phys_proc_id = 0; + c->cpu_core_id = 0; cpu_clear(cpu, cpu_sibling_setup_map); } diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 91c7acc..72f4634 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -64,7 +64,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) switch (rio_devs[wpeg_num]->type){ case CompatWPEG: - /* The Compatability Winnipeg controls the 2 legacy buses, + /* The Compatibility Winnipeg controls the 2 legacy buses, * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case * a PCI-PCI bridge card is used in either slot: total 5 buses. */ diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c index 573c0a6..bc9f59c 100644 --- a/arch/x86/kernel/suspend_64.c +++ b/arch/x86/kernel/suspend_64.c @@ -32,9 +32,9 @@ void __save_processor_state(struct saved_context *ctxt) /* * descriptor tables */ - asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); - asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); - asm volatile ("str %0" : "=m" (ctxt->tr)); + store_gdt((struct desc_ptr *)&ctxt->gdt_limit); + store_idt((struct desc_ptr *)&ctxt->idt_limit); + store_tr(ctxt->tr); /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ /* @@ -91,8 +91,9 @@ void __restore_processor_state(struct saved_context *ctxt) * now restore the descriptor tables to their proper values * ltr is done i fix_processor_context(). */ - asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); - asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); + load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); + load_idt((const struct desc_ptr *)&ctxt->idt_limit); + /* * segment registers @@ -123,7 +124,7 @@ void fix_processor_context(void) int cpu = smp_processor_id(); struct tss_struct *t = &per_cpu(init_tss, cpu); - set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ + set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9; @@ -150,8 +151,22 @@ void fix_processor_context(void) /* Defined in arch/x86_64/kernel/suspend_asm.S */ extern int restore_image(void); +/* + * Address to jump to in the last phase of restore in order to get to the image + * kernel's text (this value is passed in the image header). + */ +unsigned long restore_jump_address; + +/* + * Value of the cr3 register from before the hibernation (this value is passed + * in the image header). + */ +unsigned long restore_cr3; + pgd_t *temp_level4_pgt; +void *relocated_restore_code; + static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) { long i, j; @@ -175,7 +190,7 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en if (paddr >= end) break; - pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr; + pe = __PAGE_KERNEL_LARGE_EXEC | paddr; pe &= __supported_pte_mask; set_pmd(pmd, __pmd(pe)); } @@ -183,25 +198,42 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en return 0; } +static int res_kernel_text_pud_init(pud_t *pud, unsigned long start) +{ + pmd_t *pmd; + unsigned long paddr; + + pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); + if (!pmd) + return -ENOMEM; + set_pud(pud + pud_index(start), __pud(__pa(pmd) | _KERNPG_TABLE)); + for (paddr = 0; paddr < KERNEL_TEXT_SIZE; pmd++, paddr += PMD_SIZE) { + unsigned long pe; + + pe = __PAGE_KERNEL_LARGE_EXEC | _PAGE_GLOBAL | paddr; + pe &= __supported_pte_mask; + set_pmd(pmd, __pmd(pe)); + } + + return 0; +} + static int set_up_temporary_mappings(void) { unsigned long start, end, next; + pud_t *pud; int error; temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!temp_level4_pgt) return -ENOMEM; - /* It is safe to reuse the original kernel mapping */ - set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), - init_level4_pgt[pgd_index(__START_KERNEL_map)]); - /* Set up the direct mapping from scratch */ start = (unsigned long)pfn_to_kaddr(0); end = (unsigned long)pfn_to_kaddr(end_pfn); for (; start < end; start = next) { - pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); + pud = (pud_t *)get_safe_page(GFP_ATOMIC); if (!pud) return -ENOMEM; next = start + PGDIR_SIZE; @@ -212,7 +244,17 @@ static int set_up_temporary_mappings(void) set_pgd(temp_level4_pgt + pgd_index(start), mk_kernel_pgd(__pa(pud))); } - return 0; + + /* Set up the kernel text mapping from scratch */ + pud = (pud_t *)get_safe_page(GFP_ATOMIC); + if (!pud) + return -ENOMEM; + error = res_kernel_text_pud_init(pud, __START_KERNEL_map); + if (!error) + set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), + __pgd(__pa(pud) | _PAGE_TABLE)); + + return error; } int swsusp_arch_resume(void) @@ -222,6 +264,13 @@ int swsusp_arch_resume(void) /* We have got enough memory and from now on we cannot recover */ if ((error = set_up_temporary_mappings())) return error; + + relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); + if (!relocated_restore_code) + return -ENOMEM; + memcpy(relocated_restore_code, &core_restore_code, + &restore_registers - &core_restore_code); + restore_image(); return 0; } @@ -236,4 +285,43 @@ int pfn_is_nosave(unsigned long pfn) unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); } + +struct restore_data_record { + unsigned long jump_address; + unsigned long cr3; + unsigned long magic; +}; + +#define RESTORE_MAGIC 0x0123456789ABCDEFUL + +/** + * arch_hibernation_header_save - populate the architecture specific part + * of a hibernation image header + * @addr: address to save the data at + */ +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct restore_data_record *rdr = addr; + + if (max_size < sizeof(struct restore_data_record)) + return -EOVERFLOW; + rdr->jump_address = restore_jump_address; + rdr->cr3 = restore_cr3; + rdr->magic = RESTORE_MAGIC; + return 0; +} + +/** + * arch_hibernation_header_restore - read the architecture specific data + * from the hibernation image header + * @addr: address to read the data from + */ +int arch_hibernation_header_restore(void *addr) +{ + struct restore_data_record *rdr = addr; + + restore_jump_address = rdr->jump_address; + restore_cr3 = rdr->cr3; + return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; +} #endif /* CONFIG_HIBERNATION */ diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S index 16d183f..48344b6 100644 --- a/arch/x86/kernel/suspend_asm_64.S +++ b/arch/x86/kernel/suspend_asm_64.S @@ -2,8 +2,8 @@ * * Distribute under GPLv2. * - * swsusp_arch_resume may not use any stack, nor any variable that is - * not "NoSave" during copying pages: + * swsusp_arch_resume must not use any stack or any nonlocal variables while + * copying pages: * * Its rewriting one kernel image with another. What is stack in "old" * image could very well be data page in "new" image, and overwriting @@ -36,6 +36,13 @@ ENTRY(swsusp_arch_suspend) movq %r15, saved_context_r15(%rip) pushfq ; popq saved_context_eflags(%rip) + /* save the address of restore_registers */ + movq $restore_registers, %rax + movq %rax, restore_jump_address(%rip) + /* save cr3 */ + movq %cr3, %rax + movq %rax, restore_cr3(%rip) + call swsusp_save ret @@ -54,7 +61,17 @@ ENTRY(restore_image) movq %rcx, %cr3; movq %rax, %cr4; # turn PGE back on + /* prepare to jump to the image kernel */ + movq restore_jump_address(%rip), %rax + movq restore_cr3(%rip), %rbx + + /* prepare to copy image data to their original locations */ movq restore_pblist(%rip), %rdx + movq relocated_restore_code(%rip), %rcx + jmpq *%rcx + + /* code below has been relocated to a safe page */ +ENTRY(core_restore_code) loop: testq %rdx, %rdx jz done @@ -62,7 +79,7 @@ loop: /* get addresses from the pbe and copy the page */ movq pbe_address(%rdx), %rsi movq pbe_orig_address(%rdx), %rdi - movq $512, %rcx + movq $(PAGE_SIZE >> 3), %rcx rep movsq @@ -70,10 +87,22 @@ loop: movq pbe_next(%rdx), %rdx jmp loop done: + /* jump to the restore_registers address from the image header */ + jmpq *%rax + /* + * NOTE: This assumes that the boot kernel's text mapping covers the + * image kernel's page containing restore_registers and the address of + * this page is the same as in the image kernel's text mapping (it + * should always be true, because the text mapping is linear, starting + * from 0, and is supposed to cover the entire kernel text for every + * kernel). + * + * code below belongs to the image kernel + */ + +ENTRY(restore_registers) /* go back to the original page tables */ - movq $(init_level4_pgt - __START_KERNEL_map), %rax - addq phys_base(%rip), %rax - movq %rax, %cr3 + movq %rbx, %cr3 /* Flush TLB, including "global" things (vmalloc) */ movq mmu_cr4_features(%rip), %rax @@ -84,12 +113,9 @@ done: movq %rcx, %cr3 movq %rax, %cr4; # turn PGE back on - movl $24, %eax - movl %eax, %ds - movq saved_context_esp(%rip), %rsp movq saved_context_ebp(%rip), %rbp - /* Don't restore %rax, it must be 0 anyway */ + /* restore GPRs (we don't restore %rax, it must be 0 anyway) */ movq saved_context_ebx(%rip), %rbx movq saved_context_ecx(%rip), %rcx movq saved_context_edx(%rip), %rdx @@ -107,4 +133,7 @@ done: xorq %rax, %rax + /* tell the hibernation core that we've just restored the memory */ + movq %rax, in_suspend(%rip) + ret diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index b132d39..cc9acac 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -63,6 +63,9 @@ int panic_on_unrecovered_nmi; +DECLARE_BITMAP(used_vectors, NR_VECTORS); +EXPORT_SYMBOL_GPL(used_vectors); + asmlinkage int system_call(void); /* Do we ignore FPU interrupts ? */ @@ -288,48 +291,24 @@ EXPORT_SYMBOL(dump_stack); void show_registers(struct pt_regs *regs) { int i; - int in_kernel = 1; - unsigned long esp; - unsigned short ss, gs; - - esp = (unsigned long) (®s->esp); - savesegment(ss, ss); - savesegment(gs, gs); - if (user_mode_vm(regs)) { - in_kernel = 0; - esp = regs->esp; - ss = regs->xss & 0xffff; - } + print_modules(); - printk(KERN_EMERG "CPU: %d\n" - KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n" - KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n", - smp_processor_id(), 0xffff & regs->xcs, regs->eip, - print_tainted(), regs->eflags, init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip); - printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", - regs->eax, regs->ebx, regs->ecx, regs->edx); - printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", - regs->esi, regs->edi, regs->ebp, esp); - printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", - regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss); + __show_registers(regs, 0); printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", - TASK_COMM_LEN, current->comm, current->pid, + TASK_COMM_LEN, current->comm, task_pid_nr(current), current_thread_info(), current, task_thread_info(current)); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ - if (in_kernel) { + if (!user_mode_vm(regs)) { u8 *eip; unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_len = code_bytes; unsigned char c; printk("\n" KERN_EMERG "Stack: "); - show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); + show_stack_log_lvl(NULL, regs, ®s->esp, KERN_EMERG); printk(KERN_EMERG "Code: "); @@ -374,11 +353,11 @@ int is_valid_bugaddr(unsigned long eip) void die(const char * str, struct pt_regs * regs, long err) { static struct { - spinlock_t lock; + raw_spinlock_t lock; u32 lock_owner; int lock_owner_depth; } die = { - .lock = __SPIN_LOCK_UNLOCKED(die.lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED, .lock_owner = -1, .lock_owner_depth = 0 }; @@ -389,13 +368,14 @@ void die(const char * str, struct pt_regs * regs, long err) if (die.lock_owner != raw_smp_processor_id()) { console_verbose(); - spin_lock_irqsave(&die.lock, flags); + __raw_spin_lock(&die.lock); + raw_local_save_flags(flags); die.lock_owner = smp_processor_id(); die.lock_owner_depth = 0; bust_spinlocks(1); } else - local_save_flags(flags); + raw_local_save_flags(flags); if (++die.lock_owner_depth < 3) { unsigned long esp; @@ -439,7 +419,8 @@ void die(const char * str, struct pt_regs * regs, long err) bust_spinlocks(0); die.lock_owner = -1; add_taint(TAINT_DIE); - spin_unlock_irqrestore(&die.lock, flags); + __raw_spin_unlock(&die.lock); + raw_local_irq_restore(flags); if (!regs) return; @@ -622,7 +603,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs, printk_ratelimit()) printk(KERN_INFO "%s[%d] general protection eip:%lx esp:%lx error:%lx\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), regs->eip, regs->esp, error_code); force_sig(SIGSEGV, current); @@ -1142,6 +1123,8 @@ static void __init set_task_gate(unsigned int n, unsigned int gdt_entry) void __init trap_init(void) { + int i; + #ifdef CONFIG_EISA void __iomem *p = ioremap(0x0FFFD9, 4); if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { @@ -1201,6 +1184,11 @@ void __init trap_init(void) set_system_gate(SYSCALL_VECTOR,&system_call); + /* Reserve all the builtin and the syscall vector. */ + for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) + set_bit(i, used_vectors); + set_bit(SYSCALL_VECTOR, used_vectors); + /* * Should be a barrier for any external CPU state. */ diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index b4a9b3d..d0c2bc7 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -201,7 +201,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, #define MSG(txt) ops->warning(data, txt) /* - * x86-64 can have upto three kernel stacks: + * x86-64 can have up to three kernel stacks: * process stack * interrupt stack * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack @@ -462,7 +462,7 @@ void out_of_line_bug(void) EXPORT_SYMBOL(out_of_line_bug); #endif -static DEFINE_SPINLOCK(die_lock); +static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; @@ -474,13 +474,13 @@ unsigned __kprobes long oops_begin(void) oops_enter(); /* racy, but better than risking deadlock. */ - local_irq_save(flags); + raw_local_irq_save(flags); cpu = smp_processor_id(); - if (!spin_trylock(&die_lock)) { + if (!__raw_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else - spin_lock(&die_lock); + __raw_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; @@ -494,12 +494,10 @@ void __kprobes oops_end(unsigned long flags) die_owner = -1; bust_spinlocks(0); die_nest_count--; - if (die_nest_count) - /* We still own the lock */ - local_irq_restore(flags); - else + if (!die_nest_count) /* Nest count reaches zero, release the lock. */ - spin_unlock_irqrestore(&die_lock, flags); + __raw_spin_unlock(&die_lock); + raw_local_irq_restore(flags); if (panic_on_oops) panic("Fatal exception"); oops_exit(); diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index e87a393..d78444c 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c @@ -59,7 +59,7 @@ int check_tsc_unstable(void) } EXPORT_SYMBOL_GPL(check_tsc_unstable); -/* Accellerators for sched_clock() +/* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) * basic equation: * ns = cycles / (freq / ns_per_sec) @@ -74,7 +74,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable); * And since SC is a constant power of two, we can convert the div * into a shift. * - * We can use khz divisor instead of mhz to keep a better percision, since + * We can use khz divisor instead of mhz to keep a better precision, since * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. * (mathieu.desnoyers@polymtl.ca) * @@ -181,8 +181,8 @@ int recalibrate_cpu_khz(void) if (cpu_has_tsc) { cpu_khz = calculate_cpu_khz(); tsc_khz = cpu_khz; - cpu_data[0].loops_per_jiffy = - cpufreq_scale(cpu_data[0].loops_per_jiffy, + cpu_data(0).loops_per_jiffy = + cpufreq_scale(cpu_data(0).loops_per_jiffy, cpu_khz_old, cpu_khz); return 0; } else @@ -215,7 +215,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) return 0; } ref_freq = freq->old; - loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; + loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy; cpu_khz_ref = cpu_khz; } @@ -223,7 +223,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || (val == CPUFREQ_RESUMECHANGE)) { if (!(freq->flags & CPUFREQ_CONST_LOOPS)) - cpu_data[freq->cpu].loops_per_jiffy = + cpu_data(freq->cpu).loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index 9f22e54..9c70af4 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c @@ -73,13 +73,13 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, struct cpufreq_freqs *freq = data; unsigned long *lpj, dummy; - if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC)) + if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) return 0; lpj = &dummy; if (!(freq->flags & CPUFREQ_CONST_LOOPS)) #ifdef CONFIG_SMP - lpj = &cpu_data[freq->cpu].loops_per_jiffy; + lpj = &cpu_data(freq->cpu).loops_per_jiffy; #else lpj = &boot_cpu_data.loops_per_jiffy; #endif diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 8a67e28..ad4005c 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -48,12 +48,12 @@ ({unsigned long v; \ extern char __vsyscall_0; \ asm("" : "=r" (v) : "0" (x)); \ - ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); }) + ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); }) /* * vsyscall_gtod_data contains data that is : * - readonly from vsyscalls - * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) + * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) * Try to keep this structure as small as possible to avoid cache line ping pongs */ int __vgetcpu_mode __section_vgetcpu_mode; @@ -64,6 +64,16 @@ struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data = .sysctl_enabled = 1, }; +void update_vsyscall_tz(void) +{ + unsigned long flags; + + write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + /* sys_tz has changed */ + vsyscall_gtod_data.sys_tz = sys_tz; + write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); +} + void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) { unsigned long flags; @@ -77,7 +87,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) vsyscall_gtod_data.clock.shift = clock->shift; vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; - vsyscall_gtod_data.sys_tz = sys_tz; vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } @@ -163,7 +172,7 @@ time_t __vsyscall(1) vtime(time_t *t) if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) return time_syscall(t); - vgettimeofday(&tv, 0); + vgettimeofday(&tv, NULL); result = tv.tv_sec; if (t) *t = result; @@ -257,18 +266,10 @@ out: return ret; } -static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen, - void __user *oldval, size_t __user *oldlenp, - void __user *newval, size_t newlen) -{ - return -ENOSYS; -} - static ctl_table kernel_table2[] = { - { .ctl_name = 99, .procname = "vsyscall64", + { .procname = "vsyscall64", .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), .mode = 0644, - .strategy = vsyscall_sysctl_nostrat, .proc_handler = vsyscall_sysctl_change }, {} }; @@ -290,7 +291,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu) #ifdef CONFIG_NUMA node = cpu_to_node(cpu); #endif - if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) + if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) write_rdtscp_aux((node << 12) | cpu); /* Store cpu number in limit so that it can be loaded quickly diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c index f6edb11..952e7a8 100644 --- a/arch/x86/lib/delay_32.c +++ b/arch/x86/lib/delay_32.c @@ -82,7 +82,7 @@ inline void __const_udelay(unsigned long xloops) __asm__("mull %0" :"=d" (xloops), "=&a" (d0) :"1" (xloops), "0" - (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); + (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4))); __delay(++xloops); } diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c index 2dbebd3..0ebbfb9 100644 --- a/arch/x86/lib/delay_64.c +++ b/arch/x86/lib/delay_64.c @@ -40,7 +40,8 @@ EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { - __delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1); + __delay(((xloops * HZ * + cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1); } EXPORT_SYMBOL(__const_udelay); diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 9f38b12..8bab2b2 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -748,7 +748,7 @@ survive: retval = get_user_pages(current, current->mm, (unsigned long )to, 1, 1, 0, &pg, NULL); - if (retval == -ENOMEM && is_init(current)) { + if (retval == -ENOMEM && is_global_init(current)) { up_read(¤t->mm->mmap_sem); congestion_wait(WRITE, HZ/50); goto survive; diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 3f08010..0c28a07 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c @@ -108,7 +108,7 @@ void __init time_init_hook(void) * mca_nmi_hook - hook into MCA specific NMI chain * * Description: - * The MCA (Microchannel Arcitecture) has an NMI chain for NMI sources + * The MCA (Microchannel Architecture) has an NMI chain for NMI sources * along the MCA bus. Use this to hook into that chain if you will need * it. **/ @@ -131,7 +131,7 @@ static __init int no_ipi_broadcast(char *str) return 1; } -__setup("no_ipi_broadcast", no_ipi_broadcast); +__setup("no_ipi_broadcast=", no_ipi_broadcast); static int __init print_ipi_mode(void) { diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 8685208..1af0cc7 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -1,5 +1,5 @@ /* - * Default generic APIC driver. This handles upto 8 CPUs. + * Default generic APIC driver. This handles up to 8 CPUs. */ #define APIC_DEFINITION 1 #include <linux/threads.h> diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index 4121d15..f410d3c 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -56,7 +56,7 @@ void __init generic_bigsmp_probe(void) /* * This routine is used to switch to bigsmp mode when * - There is no apic= option specified by the user - * - generic_apic_probe() has choosen apic_default as the sub_arch + * - generic_apic_probe() has chosen apic_default as the sub_arch * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support */ diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index e4928aa..361ac51 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -36,8 +36,8 @@ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR /* per CPU data structure (for /proc/cpuinfo et al), visible externally * indexed physically */ -struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; -EXPORT_SYMBOL(cpu_data); +DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned; +EXPORT_PER_CPU_SYMBOL(cpu_info); /* physical ID of the CPU used to boot the system */ unsigned char boot_cpu_id; @@ -389,7 +389,7 @@ find_smp_config(void) /* The boot CPU must be extended */ voyager_extended_vic_processors = 1<<boot_cpu_id; - /* initially, all of the first 8 cpu's can boot */ + /* initially, all of the first 8 CPUs can boot */ voyager_allowed_boot_processors = 0xff; /* set up everything for just this CPU, we can alter * this as we start the other CPUs later */ @@ -430,7 +430,7 @@ find_smp_config(void) void __init smp_store_cpu_info(int id) { - struct cpuinfo_x86 *c=&cpu_data[id]; + struct cpuinfo_x86 *c = &cpu_data(id); *c = boot_cpu_data; @@ -634,7 +634,7 @@ do_boot_cpu(__u8 cpu) cpu, smp_processor_id())); printk("CPU%d: ", cpu); - print_cpu_info(&cpu_data[cpu]); + print_cpu_info(&cpu_data(cpu)); wmb(); cpu_set(cpu, cpu_callout_map); cpu_set(cpu, cpu_present_map); @@ -683,7 +683,7 @@ smp_boot_cpus(void) */ smp_store_cpu_info(boot_cpu_id); printk("CPU%d: ", boot_cpu_id); - print_cpu_info(&cpu_data[boot_cpu_id]); + print_cpu_info(&cpu_data(boot_cpu_id)); if(is_cpu_quad()) { /* booting on a Quad CPU */ @@ -714,7 +714,7 @@ smp_boot_cpus(void) unsigned long bogosum = 0; for (i = 0; i < NR_CPUS; i++) if (cpu_isset(i, cpu_online_map)) - bogosum += cpu_data[i].loops_per_jiffy; + bogosum += cpu_data(i).loops_per_jiffy; printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", cpucount+1, bogosum/(500000/HZ), @@ -1010,7 +1010,7 @@ static struct call_data_struct * call_data; /* execute a thread on a new CPU. The function to be called must be * previously set up. This is used to schedule a function for - * execution on all CPU's - set up the function then broadcast a + * execution on all CPUs - set up the function then broadcast a * function_interrupt CPI to come here on each CPU */ static void smp_call_function_interrupt(void) @@ -1095,7 +1095,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask, * CPI here. We don't use this actually for counting so losing * ticks doesn't matter * - * FIXME: For those CPU's which actually have a local APIC, we could + * FIXME: For those CPUs which actually have a local APIC, we could * try to use it to trigger this interrupt instead of having to * broadcast the timer tick. Unfortunately, all my pentium DYADs have * no local APIC, so I can't do this @@ -1287,7 +1287,7 @@ smp_local_timer_interrupt(void) /* * We take the 'long' return path, and there every subsystem - * grabs the apropriate locks (kernel lock/ irq lock). + * grabs the appropriate locks (kernel lock/ irq lock). * * we might want to decouple profiling from the 'long path', * and do the profiling totally in assembly. @@ -1759,7 +1759,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; if(cpus_addr(mask)[0] == 0) - /* can't have no cpu's to accept the interrupt -- extremely + /* can't have no CPUs to accept the interrupt -- extremely * bad things will happen */ return; @@ -1791,7 +1791,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) } /* this is magic, we now have the correct affinity maps, so * enable the interrupt. This will send an enable CPI to - * those cpu's who need to enable it in their local masks, + * those CPUs who need to enable it in their local masks, * causing them to correct for the new affinity . If the * interrupt is currently globally disabled, it will simply be * disabled again as it comes in (voyager lazy disable). If diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c index f9d5953..50f9366 100644 --- a/arch/x86/mach-voyager/voyager_thread.c +++ b/arch/x86/mach-voyager/voyager_thread.c @@ -64,7 +64,7 @@ check_from_kernel(void) { if(voyager_status.switch_off) { - /* FIXME: This should be configureable via proc */ + /* FIXME: This should be configurable via proc */ execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1"); } else if(voyager_status.power_fail) { VDEBUG(("Voyager daemon detected AC power failure\n")); diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c index 4de95a1..f14da2a 100644 --- a/arch/x86/mm/boot_ioremap_32.c +++ b/arch/x86/mm/boot_ioremap_32.c @@ -10,7 +10,7 @@ /* * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE - * keeps that from happenning. If anyone has a better way, I'm listening. + * keeps that from happening. If anyone has a better way, I'm listening. * * boot_pte_t is defined only if this all works correctly */ diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 1389377..fe608a4 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c @@ -273,7 +273,7 @@ unsigned long __init setup_memory(void) * When mapping a NUMA machine we allocate the node_mem_map arrays * from node local memory. They are then mapped directly into KVA * between zone normal and vmalloc space. Calculate the size of - * this space and use it to adjust the boundry between ZONE_NORMAL + * this space and use it to adjust the boundary between ZONE_NORMAL * and ZONE_HIGHMEM. */ find_max_pfn(); diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c index 6555c3d..503dfc0 100644 --- a/arch/x86/mm/fault_32.c +++ b/arch/x86/mm/fault_32.c @@ -354,7 +354,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunatly, in the case of an + * kernel and should generate an OOPS. Unfortunately, in the case of an * erroneous fault occurring in a code path which already holds mmap_sem * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user @@ -362,7 +362,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, * exceptions table. * * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibilty of a deadlock. + * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. @@ -471,8 +471,8 @@ bad_area_nosemaphore: printk_ratelimit()) { printk("%s%s[%d]: segfault at %08lx eip %08lx " "esp %08lx error %lx\n", - tsk->pid > 1 ? KERN_INFO : KERN_EMERG, - tsk->comm, tsk->pid, address, regs->eip, + task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, + tsk->comm, task_pid_nr(tsk), address, regs->eip, regs->esp, error_code); } tsk->thread.cr2 = address; @@ -564,7 +564,8 @@ no_context: * it's allocated already. */ if ((page >> PAGE_SHIFT) < max_low_pfn - && (page & _PAGE_PRESENT)) { + && (page & _PAGE_PRESENT) + && !(page & _PAGE_PSE)) { page &= PAGE_MASK; page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)]; @@ -587,7 +588,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(tsk)) { + if (is_global_init(tsk)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c index 5e0e549..644b4f7 100644 --- a/arch/x86/mm/fault_64.c +++ b/arch/x86/mm/fault_64.c @@ -169,7 +169,7 @@ void dump_pagetable(unsigned long address) pmd = pmd_offset(pud, address); if (bad_address(pmd)) goto bad; printk("PMD %lx ", pmd_val(*pmd)); - if (!pmd_present(*pmd)) goto ret; + if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; pte = pte_offset_kernel(pmd, address); if (bad_address(pte)) goto bad; @@ -285,7 +285,6 @@ static int vmalloc_fault(unsigned long address) return 0; } -static int page_fault_trace; int show_unhandled_signals = 1; /* @@ -354,10 +353,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if (likely(regs->eflags & X86_EFLAGS_IF)) local_irq_enable(); - if (unlikely(page_fault_trace)) - printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n", - regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); - if (unlikely(error_code & PF_RSVD)) pgtable_bad(address, regs, error_code); @@ -378,7 +373,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, again: /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunatly, in the case of an + * kernel and should generate an OOPS. Unfortunately, in the case of an * erroneous fault occurring in a code path which already holds mmap_sem * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user @@ -386,7 +381,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * exceptions table. * * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibilty of a deadlock. + * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. @@ -488,7 +483,7 @@ bad_area_nosemaphore: if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && printk_ratelimit()) { printk( - "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", + "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n", tsk->pid > 1 ? KERN_INFO : KERN_EMERG, tsk->comm, tsk->pid, address, regs->rip, regs->rsp, error_code); @@ -554,7 +549,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); goto again; } @@ -621,10 +616,3 @@ void vmalloc_sync_all(void) BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); } - -static int __init enable_pagefaulttrace(char *str) -{ - page_fault_trace = 1; - return 1; -} -__setup("pagefaulttrace", enable_pagefaulttrace); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 5eec5e5..3d6926b 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -612,7 +612,7 @@ void __init init_cpu_to_node(void) { int i; for (i = 0; i < NR_CPUS; i++) { - u8 apicid = x86_cpu_to_apicid[i]; + u8 apicid = x86_cpu_to_apicid_init[i]; if (apicid == BAD_APICID) continue; if (apicid_to_node[apicid] == NUMA_NO_NODE) diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c index 8a4f65b..c7b7dfe 100644 --- a/arch/x86/mm/pageattr_64.c +++ b/arch/x86/mm/pageattr_64.c @@ -230,9 +230,14 @@ void global_flush_tlb(void) struct page *pg, *next; struct list_head l; - down_read(&init_mm.mmap_sem); + /* + * Write-protect the semaphore, to exclude two contexts + * doing a list_replace_init() call in parallel and to + * exclude new additions to the deferred_pages list: + */ + down_write(&init_mm.mmap_sem); list_replace_init(&deferred_pages, &l); - up_read(&init_mm.mmap_sem); + up_write(&init_mm.mmap_sem); flush_map(&l); diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 56089cc..ea85172 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -218,7 +218,7 @@ static inline int save_add_info(void) {return 0;} /* * Update nodes_add and decide if to include add are in the zone. * Both SPARSE and RESERVE need nodes_add infomation. - * This code supports one contigious hot add area per node. + * This code supports one contiguous hot add area per node. */ static int reserve_hotadd(int node, unsigned long start, unsigned long end) { diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index c049ce4..0ed046a 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c @@ -13,25 +13,45 @@ #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/uaccess.h> +#include <asm/stacktrace.h> -struct frame_head { - struct frame_head * ebp; - unsigned long ret; -} __attribute__((packed)); +static void backtrace_warning_symbol(void *data, char *msg, + unsigned long symbol) +{ + /* Ignore warnings */ +} -static struct frame_head * -dump_kernel_backtrace(struct frame_head * head) +static void backtrace_warning(void *data, char *msg) { - oprofile_add_trace(head->ret); + /* Ignore warnings */ +} - /* frame pointers should strictly progress back up the stack - * (towards higher addresses) */ - if (head >= head->ebp) - return NULL; +static int backtrace_stack(void *data, char *name) +{ + /* Yes, we want all stacks */ + return 0; +} + +static void backtrace_address(void *data, unsigned long addr) +{ + unsigned int *depth = data; - return head->ebp; + if ((*depth)--) + oprofile_add_trace(addr); } +static struct stacktrace_ops backtrace_ops = { + .warning = backtrace_warning, + .warning_symbol = backtrace_warning_symbol, + .stack = backtrace_stack, + .address = backtrace_address, +}; + +struct frame_head { + struct frame_head *ebp; + unsigned long ret; +} __attribute__((packed)); + static struct frame_head * dump_user_backtrace(struct frame_head * head) { @@ -53,72 +73,16 @@ dump_user_backtrace(struct frame_head * head) return bufhead[0].ebp; } -/* - * | | /\ Higher addresses - * | | - * --------------- stack base (address of current_thread_info) - * | thread info | - * . . - * | stack | - * --------------- saved regs->ebp value if valid (frame_head address) - * . . - * --------------- saved regs->rsp value if x86_64 - * | | - * --------------- struct pt_regs * stored on stack if 32-bit - * | | - * . . - * | | - * --------------- %esp - * | | - * | | \/ Lower addresses - * - * Thus, regs (or regs->rsp for x86_64) <-> stack base restricts the - * valid(ish) ebp values. Note: (1) for x86_64, NMI and several other - * exceptions use special stacks, maintained by the interrupt stack table - * (IST). These stacks are set up in trap_init() in - * arch/x86_64/kernel/traps.c. Thus, for x86_64, regs now does not point - * to the kernel stack; instead, it points to some location on the NMI - * stack. On the other hand, regs->rsp is the stack pointer saved when the - * NMI occurred. (2) For 32-bit, regs->esp is not valid because the - * processor does not save %esp on the kernel stack when interrupts occur - * in the kernel mode. - */ -#ifdef CONFIG_FRAME_POINTER -static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs) -{ - unsigned long headaddr = (unsigned long)head; -#ifdef CONFIG_X86_64 - unsigned long stack = (unsigned long)regs->rsp; -#else - unsigned long stack = (unsigned long)regs; -#endif - unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE; - - return headaddr > stack && headaddr < stack_base; -} -#else -/* without fp, it's just junk */ -static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs) -{ - return 0; -} -#endif - - void x86_backtrace(struct pt_regs * const regs, unsigned int depth) { - struct frame_head *head; - -#ifdef CONFIG_X86_64 - head = (struct frame_head *)regs->rbp; -#else - head = (struct frame_head *)regs->ebp; -#endif + struct frame_head *head = (struct frame_head *)frame_pointer(regs); + unsigned long stack = stack_pointer(regs); if (!user_mode_vm(regs)) { - while (depth-- && valid_kernel_stack(head, regs)) - head = dump_kernel_backtrace(head); + if (depth) + dump_trace(NULL, regs, (unsigned long *)stack, + &backtrace_ops, &depth); return; } diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index abb1aa9..45b605f 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -29,7 +29,7 @@ struct op_msrs { struct pt_regs; /* The model vtable abstracts the differences between - * various x86 CPU model's perfctr support. + * various x86 CPU models' perfctr support. */ struct op_x86_model_spec { unsigned int const num_counters; diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index c52150f..88d8f5c 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -169,7 +169,7 @@ void eisa_set_level_irq(unsigned int irq) } /* - * Common IRQ routing practice: nybbles in config space, + * Common IRQ routing practice: nibbles in config space, * offset by some magic constant. */ static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) @@ -585,7 +585,7 @@ static __init int via_router_probe(struct irq_router *r, /* FIXME: We should move some of the quirk fixup stuff here */ /* - * work arounds for some buggy BIOSes + * workarounds for some buggy BIOSes */ if (device == PCI_DEVICE_ID_VIA_82C586_0) { switch(router->device) { diff --git a/arch/x86_64/.gitignore b/arch/x86_64/.gitignore new file mode 100644 index 0000000..36ef4c3 --- /dev/null +++ b/arch/x86_64/.gitignore @@ -0,0 +1 @@ +boot diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 43fafe9..aab25f3 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -716,9 +716,16 @@ menu "Power management options" source kernel/power/Kconfig +config ARCH_HIBERNATION_HEADER + bool + depends on HIBERNATION + default y + source "drivers/acpi/Kconfig" -source "arch/x86/kernel/cpufreq/Kconfig" +source "arch/x86/kernel/cpu/cpufreq/Kconfig_64" + +source "drivers/cpuidle/Kconfig" endmenu @@ -761,9 +768,9 @@ source "fs/Kconfig.binfmt" config IA32_EMULATION bool "IA32 Emulation" help - Include code to run 32-bit programs under a 64-bit kernel. You should likely - turn this on, unless you're 100% sure that you don't have any 32-bit programs - left. + Include code to run 32-bit programs under a 64-bit kernel. You should + likely turn this on, unless you're 100% sure that you don't have any + 32-bit programs left. config IA32_AOUT tristate "IA32 a.out support" @@ -794,21 +801,6 @@ source "drivers/firmware/Kconfig" source fs/Kconfig -menu "Instrumentation Support" - -source "arch/x86/oprofile/Kconfig" - -config KPROBES - bool "Kprobes" - depends on KALLSYMS && MODULES - help - Kprobes allows you to trap at almost any kernel address and - execute a callback function. register_kprobe() establishes - a probepoint and specifies the callback. Kprobes is useful - for kernel debugging, non-intrusive instrumentation and testing. - If in doubt, say "N". -endmenu - source "arch/x86_64/Kconfig.debug" source "security/Kconfig" diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile index 03e1ede..6d89ab7 100644 --- a/arch/x86_64/Makefile +++ b/arch/x86_64/Makefile @@ -74,7 +74,7 @@ KBUILD_CFLAGS += $(cflags-y) CFLAGS_KERNEL += $(cflags-kernel-y) KBUILD_AFLAGS += -m64 -head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task_64.o +head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task.o libs-y += arch/x86/lib/ core-y += arch/x86/kernel/ \ @@ -97,9 +97,9 @@ BOOTIMAGE := arch/x86/boot/bzImage KBUILD_IMAGE := $(BOOTIMAGE) bzImage: vmlinux - $(Q)mkdir -p $(objtree)/arch/x86_64/boot - $(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage $(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE) + $(Q)mkdir -p $(objtree)/arch/x86_64/boot + $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage bzlilo: vmlinux $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 7fbb44b..85ffbb4 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -251,6 +251,8 @@ config EMBEDDED_RAMDISK_IMAGE provide one yourself. endmenu +source "kernel/Kconfig.instrumentation" + source "arch/xtensa/Kconfig.debug" source "security/Kconfig" diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index 9c5185f..40aa55b 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -8,7 +8,8 @@ # -EXTRA_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include +# KBUILD_CFLAGS used when building rest of boot (takes effect recursively) +KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include HOSTFLAGS += -Iarch/$(ARCH)/boot/include BIG_ENDIAN := $(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#") diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 8be99c7..397bcd6 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -176,7 +176,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause) printk("Caught unhandled exception in '%s' " "(pid = %d, pc = %#010lx) - should not happen\n" "\tEXCCAUSE is %ld\n", - current->comm, current->pid, regs->pc, exccause); + current->comm, task_pid_nr(current), regs->pc, exccause); force_sig(SIGILL, current); } @@ -228,7 +228,7 @@ do_illegal_instruction(struct pt_regs *regs) /* If in user mode, send SIGILL signal to current process. */ printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", - current->comm, current->pid, regs->pc); + current->comm, task_pid_nr(current), regs->pc); force_sig(SIGILL, current); } @@ -254,7 +254,7 @@ do_unaligned_user (struct pt_regs *regs) current->thread.error_code = -3; printk("Unaligned memory access to %08lx in '%s' " "(pid = %d, pc = %#010lx)\n", - regs->excvaddr, current->comm, current->pid, regs->pc); + regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 2f84285..33f366b 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -145,7 +145,7 @@ bad_area: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { + if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/xtensa/platform-iss/network.c b/arch/xtensa/platform-iss/network.c index f09962f..b61fb36 100644 --- a/arch/xtensa/platform-iss/network.c +++ b/arch/xtensa/platform-iss/network.c @@ -798,7 +798,7 @@ static int iss_net_setup(char *str) #undef ERR -__setup("eth", iss_net_setup); +__setup("eth=", iss_net_setup); /* * Initialize all ISS Ethernet devices previously registered in iss_net_setup. |