diff options
author | Simon Arlott <simon@fire.lp0.eu> | 2007-10-20 01:13:56 +0200 |
---|---|---|
committer | Adrian Bunk <bunk@kernel.org> | 2007-10-20 01:13:56 +0200 |
commit | 27b46d7661dc720224813eb4f452e424f1bf3a9a (patch) | |
tree | 1683daefc5f245efa5a1c2a3808277b45d21ce72 /arch | |
parent | 5e71c6051585da46b898b21bd8e5b6df2795f03f (diff) | |
download | kernel_samsung_smdk4412-27b46d7661dc720224813eb4f452e424f1bf3a9a.zip kernel_samsung_smdk4412-27b46d7661dc720224813eb4f452e424f1bf3a9a.tar.gz kernel_samsung_smdk4412-27b46d7661dc720224813eb4f452e424f1bf3a9a.tar.bz2 |
spelling fixes: arch/i386/
Spelling fixes in arch/i386/.
Signed-off-by: Simon Arlott <simon@fire.lp0.eu>
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Diffstat (limited to 'arch')
37 files changed, 57 insertions, 57 deletions
diff --git a/arch/x86/boot/compressed/misc_32.c b/arch/x86/boot/compressed/misc_32.c index b28505c..1dc1e19 100644 --- a/arch/x86/boot/compressed/misc_32.c +++ b/arch/x86/boot/compressed/misc_32.c @@ -25,7 +25,7 @@ /* * Getting to provable safe in place decompression is hard. - * Worst case behaviours need to be analized. + * Worst case behaviours need to be analyzed. * Background information: * * The file layout is: @@ -94,7 +94,7 @@ * Adding 32768 instead of 32767 just makes for round numbers. * Adding the decompressor_size is necessary as it musht live after all * of the data as well. Last I measured the decompressor is about 14K. - * 10K of actuall data and 4K of bss. + * 10K of actual data and 4K of bss. * */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index afd2afe..25337f2 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -99,7 +99,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; /* * The default interrupt routing model is PIC (8259). This gets - * overriden if IOAPICs are enumerated (below). + * overridden if IOAPICs are enumerated (below). */ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; @@ -414,8 +414,8 @@ acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end * * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. - * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) - * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) + * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0) + * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0) */ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) @@ -427,7 +427,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) old = inb(0x4d0) | (inb(0x4d1) << 8); /* - * If we use ACPI to set PCI irq's, then we should clear ELCR + * If we use ACPI to set PCI IRQs, then we should clear ELCR * since we will set it correctly as we enable the PCI irq * routing. */ diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 793341f..08b07c1 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -947,7 +947,7 @@ void __devinit setup_local_APIC(void) * Set up LVT0, LVT1: * * set up through-local-APIC on the BP's LINT0. This is not - * strictly necessery in pure symmetric-IO mode, but sometimes + * strictly necessary in pure symmetric-IO mode, but sometimes * we delegate interrupts to the 8259A. */ /* @@ -998,7 +998,7 @@ void __devinit setup_local_APIC(void) } else { if (esr_disable) /* - * Something untraceble is creating bad interrupts on + * Something untraceable is creating bad interrupts on * secondary quads ... for the moment, just leave the * ESR disabled - we can't do anything useful with the * errors anyway - mbligh diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 32f2365..17089a0 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -57,7 +57,7 @@ * screen-blanking and gpm (Stephen Rothwell); Linux 1.99.4 * 1.2a:Simple change to stop mysterious bug reports with SMP also added * levels to the printk calls. APM is not defined for SMP machines. - * The new replacment for it is, but Linux doesn't yet support this. + * The new replacement for it is, but Linux doesn't yet support this. * Alan Cox Linux 2.1.55 * 1.3: Set up a valid data descriptor 0x40 for buggy BIOS's * 1.4: Upgraded to support APM 1.2. Integrated ThinkPad suspend patch by diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5f8af87..1ff88c7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -266,7 +266,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_HT /* * On a AMD multi core setup the lower bits of the APIC id - * distingush the cores. + * distinguish the cores. */ if (c->x86_max_cores > 1) { int cpu = smp_processor_id(); diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 473eac8..9681fa1 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -53,7 +53,7 @@ static u32 __cpuinit ramtop(void) /* 16388 */ continue; /* * Don't MCR over reserved space. Ignore the ISA hole - * we frob around that catastrophy already + * we frob around that catastrophe already */ if (e820.map[i].type == E820_RESERVED) @@ -287,7 +287,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) c->x86_capability[5] = cpuid_edx(0xC0000001); } - /* Cyrix III family needs CX8 & PGE explicity enabled. */ + /* Cyrix III family needs CX8 & PGE explicitly enabled. */ if (c->x86_model >=6 && c->x86_model <= 9) { rdmsr (MSR_VIA_FCR, lo, hi); lo |= (1<<1 | 1<<7); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d506201..e2fcf20 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -207,7 +207,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) static int __init x86_fxsr_setup(char * s) { - /* Tell all the other CPU's to not use it... */ + /* Tell all the other CPUs to not use it... */ disable_x86_fxsr = 1; /* diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 32f0bda..f03e9153 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -260,7 +260,7 @@ static int nforce2_target(struct cpufreq_policy *policy, freqs.old = nforce2_get(policy->cpu); freqs.new = target_fsb * fid * 100; - freqs.cpu = 0; /* Only one CPU on nForce2 plattforms */ + freqs.cpu = 0; /* Only one CPU on nForce2 platforms */ if (freqs.old == freqs.new) return 0; diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c index ed2bda1..2ed7db2 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c @@ -12,12 +12,12 @@ * of any nature resulting due to the use of this software. This * software is provided AS-IS with no warranties. * - * Theoritical note: + * Theoretical note: * * (see Geode(tm) CS5530 manual (rev.4.1) page.56) * * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 - * are based on Suspend Moduration. + * are based on Suspend Modulation. * * Suspend Modulation works by asserting and de-asserting the SUSP# pin * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# @@ -101,11 +101,11 @@ /* SUSCFG bits */ #define SUSMOD (1<<0) /* enable/disable suspend modulation */ -/* the belows support only with cs5530 (after rev.1.2)/cs5530A */ +/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */ #define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ #define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ -/* the belows support only with cs5530A */ +/* the below is supported only with cs5530A */ #define PWRSVE_ISA (1<<3) /* stop ISA clock */ #define PWRSVE (1<<4) /* active idle */ diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c06ac68..9c36a53 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -168,7 +168,7 @@ static void count_off_irt(struct powernow_k8_data *data) return; } -/* the voltage stabalization time */ +/* the voltage stabilization time */ static void count_off_vst(struct powernow_k8_data *data) { udelay(data->vstable * VST_UNITS_20US); diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index b06c812..7c4f6e0 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -148,10 +148,10 @@ struct powernow_k8_data { #define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */ #define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */ -#define VST_UNITS_20US 20 /* Voltage Stabalization Time is in units of 20us */ +#define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */ /* - * Most values of interest are enocoded in a single field of the _PSS + * Most values of interest are encoded in a single field of the _PSS * entries: the "control" value. */ diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 4aa2ff8..88d66fb 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -256,7 +256,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) u32 vendor, device; /* It isn't really a PCI quirk directly, but the cure is the same. The MediaGX has deep magic SMM stuff that handles the - SB emulation. It thows away the fifo on disable_dma() which + SB emulation. It throws away the fifo on disable_dma() which is wrong and ruins the audio. Bug2: VSA1 has a wrap bug so that using maximum sized DMA diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index 2287d48..9964be3 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c @@ -147,10 +147,10 @@ static void prepare_set(void) write_cr0(cr0); wbinvd(); - /* Cyrix ARRs - everything else were excluded at the top */ + /* Cyrix ARRs - everything else was excluded at the top */ ccr3 = getCx86(CX86_CCR3); - /* Cyrix ARRs - everything else were excluded at the top */ + /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); } diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 56f64e3..992f08d 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -182,7 +182,7 @@ static inline void k8_enable_fixed_iorrs(void) /** * Checks and updates an fixed-range MTRR if it differs from the value it - * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also. + * should have. If K8 extentions are wanted, update the K8 SYSCFG MSR also. * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information * \param msr MSR address of the MTTR which should be checked and updated * \param changed pointer which indicates whether the MTRR needed to be changed diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 5e4be30..9abbdf7 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -748,7 +748,7 @@ static int __init mtrr_init_finialize(void) if (use_intel()) mtrr_state_warn(); else { - /* The CPUs haven't MTRR and seemes not support SMP. They have + /* The CPUs haven't MTRR and seem to not support SMP. They have * specific drivers, we use a tricky method to support * suspend/resume for them. * TBD: is there any system with such CPU which supports diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c index d58039e..58fd54e 100644 --- a/arch/x86/kernel/e820_32.c +++ b/arch/x86/kernel/e820_32.c @@ -706,7 +706,7 @@ void __init e820_register_memory(void) int i; /* - * Search for the bigest gap in the low 32 bits of the e820 + * Search for the biggest gap in the low 32 bits of the e820 * memory space. */ last = 0x100000000ull; diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index f836707..772afab 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -350,7 +350,7 @@ static int hpet_clocksource_register(void) * * hpet period is in femto seconds per cycle * so we need to convert this to ns/cyc units - * aproximated by mult/2^shift + * approximated by mult/2^shift * * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift * fsec/cyc * 1ns/1000000fsec * 2^shift = mult diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 5cc8841..a42c807 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -86,7 +86,7 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) * On UP the PIT can serve all of the possible timer functions. On SMP systems * it can be solely used for the global tick. * - * The profiling and update capabilites are switched off once the local apic is + * The profiling and update capabilities are switched off once the local apic is * registered. This mechanism replaces the previous #ifdef LOCAL_APIC - * !using_apic_timer decisions in do_timer_interrupt_hook() */ diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 5f10c71..03e88fc 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -584,7 +584,7 @@ tryanotherirq: imbalance = move_this_load; - /* For physical_balance case, we accumlated both load + /* For physical_balance case, we accumulated both load * values in the one of the siblings cpu_irq[], * to use the same code for physical and logical processors * as much as possible. @@ -2472,7 +2472,7 @@ void destroy_irq(unsigned int irq) } /* - * MSI mesage composition + * MSI message composition */ #ifdef CONFIG_PCI_MSI static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 13abb4e..7a05a7f 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -1001,7 +1001,7 @@ void __init mp_config_acpi_legacy_irqs (void) /* * Use the default configuration for the IRQs 0-15. Unless - * overriden by (MADT) interrupt source override entries. + * overridden by (MADT) interrupt source override entries. */ for (i = 0; i < 16; i++) { int idx; diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c index 99102ec..ff5431c 100644 --- a/arch/x86/kernel/ptrace_32.c +++ b/arch/x86/kernel/ptrace_32.c @@ -632,7 +632,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) /* User-mode eip? */ info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL; - /* Send us the fakey SIGTRAP */ + /* Send us the fake SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); } diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 978dc01..18e6eaf 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -624,7 +624,7 @@ void __init setup_arch(char **cmdline_p) /* * NOTE: before this point _nobody_ is allowed to allocate * any memory using the bootmem allocator. Although the - * alloctor is now initialised only the first 8Mb of the kernel + * allocator is now initialised only the first 8Mb of the kernel * virtual address space has been mapped. All allocations before * paging_init() has completed must use the alloc_bootmem_low_pages() * variant (which allocates DMA'able memory) and care must be taken diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 6dc394b..9bdd830 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -594,7 +594,7 @@ static void fastcall do_signal(struct pt_regs *regs) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* Reenable any watchpoints before delivering the + /* Re-enable any watchpoints before delivering the * signal to user space. The processor register will * have been cleared if the watchpoint triggered * inside the kernel. diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index 791d9f8..9ced828 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -69,7 +69,7 @@ * * B stepping CPUs may hang. There are hardware work arounds * for this. We warn about it in case your board doesn't have the work - * arounds. Basically thats so I can tell anyone with a B stepping + * arounds. Basically that's so I can tell anyone with a B stepping * CPU and SMP problems "tough". * * Specific items [From Pentium Processor Specification Update] @@ -273,7 +273,7 @@ void leave_mm(unsigned long cpu) * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but smp_invalidate_interrupt ignore flush ipis - * for the wrong mm, and in the worst case we perform a superflous + * for the wrong mm, and in the worst case we perform a superfluous * tlb flush. * 1a2) set cpu_tlbstate to TLBSTATE_OK * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index be3faac..1b9ee68 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -412,7 +412,7 @@ static void __cpuinit start_secondary(void *unused) /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of - * IPI receipients, and the time when the determination is made + * IPI recipients, and the time when the determination is made * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 91c7acc..72f4634 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -64,7 +64,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) switch (rio_devs[wpeg_num]->type){ case CompatWPEG: - /* The Compatability Winnipeg controls the 2 legacy buses, + /* The Compatibility Winnipeg controls the 2 legacy buses, * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case * a PCI-PCI bridge card is used in either slot: total 5 buses. */ diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index e87a393..cb19df7 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c @@ -59,7 +59,7 @@ int check_tsc_unstable(void) } EXPORT_SYMBOL_GPL(check_tsc_unstable); -/* Accellerators for sched_clock() +/* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) * basic equation: * ns = cycles / (freq / ns_per_sec) diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 47207ca..0c28a07 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c @@ -108,7 +108,7 @@ void __init time_init_hook(void) * mca_nmi_hook - hook into MCA specific NMI chain * * Description: - * The MCA (Microchannel Arcitecture) has an NMI chain for NMI sources + * The MCA (Microchannel Architecture) has an NMI chain for NMI sources * along the MCA bus. Use this to hook into that chain if you will need * it. **/ diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 8685208..1af0cc7 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -1,5 +1,5 @@ /* - * Default generic APIC driver. This handles upto 8 CPUs. + * Default generic APIC driver. This handles up to 8 CPUs. */ #define APIC_DEFINITION 1 #include <linux/threads.h> diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index 4121d15..f410d3c 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -56,7 +56,7 @@ void __init generic_bigsmp_probe(void) /* * This routine is used to switch to bigsmp mode when * - There is no apic= option specified by the user - * - generic_apic_probe() has choosen apic_default as the sub_arch + * - generic_apic_probe() has chosen apic_default as the sub_arch * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support */ diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index e4928aa..c5f2692 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -389,7 +389,7 @@ find_smp_config(void) /* The boot CPU must be extended */ voyager_extended_vic_processors = 1<<boot_cpu_id; - /* initially, all of the first 8 cpu's can boot */ + /* initially, all of the first 8 CPUs can boot */ voyager_allowed_boot_processors = 0xff; /* set up everything for just this CPU, we can alter * this as we start the other CPUs later */ @@ -1010,7 +1010,7 @@ static struct call_data_struct * call_data; /* execute a thread on a new CPU. The function to be called must be * previously set up. This is used to schedule a function for - * execution on all CPU's - set up the function then broadcast a + * execution on all CPUs - set up the function then broadcast a * function_interrupt CPI to come here on each CPU */ static void smp_call_function_interrupt(void) @@ -1095,7 +1095,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask, * CPI here. We don't use this actually for counting so losing * ticks doesn't matter * - * FIXME: For those CPU's which actually have a local APIC, we could + * FIXME: For those CPUs which actually have a local APIC, we could * try to use it to trigger this interrupt instead of having to * broadcast the timer tick. Unfortunately, all my pentium DYADs have * no local APIC, so I can't do this @@ -1287,7 +1287,7 @@ smp_local_timer_interrupt(void) /* * We take the 'long' return path, and there every subsystem - * grabs the apropriate locks (kernel lock/ irq lock). + * grabs the appropriate locks (kernel lock/ irq lock). * * we might want to decouple profiling from the 'long path', * and do the profiling totally in assembly. @@ -1759,7 +1759,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; if(cpus_addr(mask)[0] == 0) - /* can't have no cpu's to accept the interrupt -- extremely + /* can't have no CPUs to accept the interrupt -- extremely * bad things will happen */ return; @@ -1791,7 +1791,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) } /* this is magic, we now have the correct affinity maps, so * enable the interrupt. This will send an enable CPI to - * those cpu's who need to enable it in their local masks, + * those CPUs who need to enable it in their local masks, * causing them to correct for the new affinity . If the * interrupt is currently globally disabled, it will simply be * disabled again as it comes in (voyager lazy disable). If diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c index f9d5953..50f9366 100644 --- a/arch/x86/mach-voyager/voyager_thread.c +++ b/arch/x86/mach-voyager/voyager_thread.c @@ -64,7 +64,7 @@ check_from_kernel(void) { if(voyager_status.switch_off) { - /* FIXME: This should be configureable via proc */ + /* FIXME: This should be configurable via proc */ execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1"); } else if(voyager_status.power_fail) { VDEBUG(("Voyager daemon detected AC power failure\n")); diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c index 4de95a1..f14da2a 100644 --- a/arch/x86/mm/boot_ioremap_32.c +++ b/arch/x86/mm/boot_ioremap_32.c @@ -10,7 +10,7 @@ /* * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE - * keeps that from happenning. If anyone has a better way, I'm listening. + * keeps that from happening. If anyone has a better way, I'm listening. * * boot_pte_t is defined only if this all works correctly */ diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 1389377..fe608a4 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c @@ -273,7 +273,7 @@ unsigned long __init setup_memory(void) * When mapping a NUMA machine we allocate the node_mem_map arrays * from node local memory. They are then mapped directly into KVA * between zone normal and vmalloc space. Calculate the size of - * this space and use it to adjust the boundry between ZONE_NORMAL + * this space and use it to adjust the boundary between ZONE_NORMAL * and ZONE_HIGHMEM. */ find_max_pfn(); diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c index 4d3e538..d618159 100644 --- a/arch/x86/mm/fault_32.c +++ b/arch/x86/mm/fault_32.c @@ -354,7 +354,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunatly, in the case of an + * kernel and should generate an OOPS. Unfortunately, in the case of an * erroneous fault occurring in a code path which already holds mmap_sem * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user @@ -362,7 +362,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, * exceptions table. * * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibilty of a deadlock. + * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index abb1aa9..45b605f 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -29,7 +29,7 @@ struct op_msrs { struct pt_regs; /* The model vtable abstracts the differences between - * various x86 CPU model's perfctr support. + * various x86 CPU models' perfctr support. */ struct op_x86_model_spec { unsigned int const num_counters; diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index c52150f..88d8f5c 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -169,7 +169,7 @@ void eisa_set_level_irq(unsigned int irq) } /* - * Common IRQ routing practice: nybbles in config space, + * Common IRQ routing practice: nibbles in config space, * offset by some magic constant. */ static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) @@ -585,7 +585,7 @@ static __init int via_router_probe(struct irq_router *r, /* FIXME: We should move some of the quirk fixup stuff here */ /* - * work arounds for some buggy BIOSes + * workarounds for some buggy BIOSes */ if (device == PCI_DEVICE_ID_VIA_82C586_0) { switch(router->device) { |