aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/perfmon.c
diff options
context:
space:
mode:
authorSimon Arlott <simon@fire.lp0.eu>2007-05-11 14:55:43 -0700
committerTony Luck <tony.luck@intel.com>2007-05-11 14:55:43 -0700
commit72fdbdce3d52282f8ea95f512e871791256754e6 (patch)
treeb7d544875c5d89e10859f3e5dc97e2e064a00e54 /arch/ia64/kernel/perfmon.c
parent0a3fd051c7036ef71b58863f8e5da7c3dabd9d3f (diff)
downloadkernel_samsung_smdk4412-72fdbdce3d52282f8ea95f512e871791256754e6.zip
kernel_samsung_smdk4412-72fdbdce3d52282f8ea95f512e871791256754e6.tar.gz
kernel_samsung_smdk4412-72fdbdce3d52282f8ea95f512e871791256754e6.tar.bz2
[IA64] spelling fixes: arch/ia64/
Spelling and apostrophe fixes in arch/ia64/. Signed-off-by: Simon Arlott <simon@fire.lp0.eu> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/perfmon.c')
-rw-r--r--arch/ia64/kernel/perfmon.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index e7191ca..b7133ca 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1318,7 +1318,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{
unsigned long flags;
/*
- * validy checks on cpu_mask have been done upstream
+ * validity checks on cpu_mask have been done upstream
*/
LOCK_PFS(flags);
@@ -1384,7 +1384,7 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
{
unsigned long flags;
/*
- * validy checks on cpu_mask have been done upstream
+ * validity checks on cpu_mask have been done upstream
*/
LOCK_PFS(flags);
@@ -1835,7 +1835,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
/*
* remove our file from the async queue, if we use this mode.
* This can be done without the context being protected. We come
- * here when the context has become unreacheable by other tasks.
+ * here when the context has become unreachable by other tasks.
*
* We may still have active monitoring at this point and we may
* end up in pfm_overflow_handler(). However, fasync_helper()
@@ -2132,7 +2132,7 @@ doit:
filp->private_data = NULL;
/*
- * if we free on the spot, the context is now completely unreacheable
+ * if we free on the spot, the context is now completely unreachable
* from the callers side. The monitored task side is also cut, so we
* can freely cut.
*
@@ -2562,7 +2562,7 @@ pfm_reset_pmu_state(pfm_context_t *ctx)
ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
/*
- * bitmask of all PMDs that are accesible to this context
+ * bitmask of all PMDs that are accessible to this context
*/
ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
@@ -3395,7 +3395,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
/*
* we can only read the register that we use. That includes
- * the one we explicitely initialize AND the one we want included
+ * the one we explicitly initialize AND the one we want included
* in the sampling buffer (smpl_regs).
*
* Having this restriction allows optimization in the ctxsw routine
@@ -3715,7 +3715,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* if non-blocking, then we ensure that the task will go into
* pfm_handle_work() before returning to user mode.
*
- * We cannot explicitely reset another task, it MUST always
+ * We cannot explicitly reset another task, it MUST always
* be done by the task itself. This works for system wide because
* the tool that is controlling the session is logically doing
* "self-monitoring".
@@ -4644,7 +4644,7 @@ pfm_exit_thread(struct task_struct *task)
switch(state) {
case PFM_CTX_UNLOADED:
/*
- * only comes to thios function if pfm_context is not NULL, i.e., cannot
+ * only comes to this function if pfm_context is not NULL, i.e., cannot
* be in unloaded state
*/
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
@@ -5247,7 +5247,7 @@ pfm_end_notify_user(pfm_context_t *ctx)
/*
* main overflow processing routine.
- * it can be called from the interrupt path or explicitely during the context switch code
+ * it can be called from the interrupt path or explicitly during the context switch code
*/
static void
pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)