aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-18 20:14:01 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-18 20:47:30 +0200
commitdef0a9b2573e00ab0b486cb5382625203ab4c4a6 (patch)
tree1e3086fc320c244297b5b63cce47065bcfb71e8c /kernel/perf_counter.c
parentcf450a7355a116af793998c118a6bcf7f5a8367e (diff)
downloadkernel_samsung_smdk4412-def0a9b2573e00ab0b486cb5382625203ab4c4a6.zip
kernel_samsung_smdk4412-def0a9b2573e00ab0b486cb5382625203ab4c4a6.tar.gz
kernel_samsung_smdk4412-def0a9b2573e00ab0b486cb5382625203ab4c4a6.tar.bz2
sched_clock: Make it NMI safe
Arjan complained about the suckyness of TSC on modern machines, and asked if we could do something about that for PERF_SAMPLE_TIME. Make cpu_clock() NMI safe by removing the spinlock and using cmpxchg. This also makes it smaller and more robust. Affects architectures that use HAVE_UNSTABLE_SCHED_CLOCK, i.e. IA64 and x86. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 6944bd5..06d233a 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2955,10 +2955,7 @@ void perf_prepare_sample(struct perf_event_header *header,
}
if (sample_type & PERF_SAMPLE_TIME) {
- /*
- * Maybe do better on x86 and provide cpu_clock_nmi()
- */
- data->time = sched_clock();
+ data->time = perf_clock();
header->size += sizeof(data->time);
}
@@ -3488,7 +3485,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
.misc = 0,
.size = sizeof(throttle_event),
},
- .time = sched_clock(),
+ .time = perf_clock(),
.id = primary_counter_id(counter),
.stream_id = counter->id,
};
@@ -3540,7 +3537,7 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
}
if (counter->attr.freq) {
- u64 now = sched_clock();
+ u64 now = perf_clock();
s64 delta = now - hwc->freq_stamp;
hwc->freq_stamp = now;