diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-11 13:35:08 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 20:46:28 +0200 |
commit | b0a873ebbf87bf38bf70b5e39a7cadc96099fa13 (patch) | |
tree | 63ab672b847aed295f99b9b2a3bbcfd5d3d35bd9 /arch/arm/kernel/perf_event.c | |
parent | 51b0fe39549a04858001922919ab355dee9bdfcf (diff) | |
download | kernel_samsung_smdk4412-b0a873ebbf87bf38bf70b5e39a7cadc96099fa13.zip kernel_samsung_smdk4412-b0a873ebbf87bf38bf70b5e39a7cadc96099fa13.tar.gz kernel_samsung_smdk4412-b0a873ebbf87bf38bf70b5e39a7cadc96099fa13.tar.bz2 |
perf: Register PMU implementations
Simple registration interface for struct pmu, this provides the
infrastructure for removing all the weak functions.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 38 |
1 files changed, 26 insertions, 12 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 0671e92..f62f9db 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -306,12 +306,7 @@ out: return err; } -static struct pmu pmu = { - .enable = armpmu_enable, - .disable = armpmu_disable, - .unthrottle = armpmu_unthrottle, - .read = armpmu_read, -}; +static struct pmu pmu; static int validate_event(struct cpu_hw_events *cpuc, @@ -491,20 +486,29 @@ __hw_perf_event_init(struct perf_event *event) return err; } -struct pmu * -hw_perf_event_init(struct perf_event *event) +static int armpmu_event_init(struct perf_event *event) { int err = 0; + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + if (!armpmu) - return ERR_PTR(-ENODEV); + return -ENODEV; event->destroy = hw_perf_event_destroy; if (!atomic_inc_not_zero(&active_events)) { if (atomic_read(&active_events) > perf_max_events) { atomic_dec(&active_events); - return ERR_PTR(-ENOSPC); + return -ENOSPC; } mutex_lock(&pmu_reserve_mutex); @@ -518,15 +522,23 @@ hw_perf_event_init(struct perf_event *event) } if (err) - return ERR_PTR(err); + return err; err = __hw_perf_event_init(event); if (err) hw_perf_event_destroy(event); - return err ? ERR_PTR(err) : &pmu; + return err; } +static struct pmu pmu = { + .event_init = armpmu_event_init, + .enable = armpmu_enable, + .disable = armpmu_disable, + .unthrottle = armpmu_unthrottle, + .read = armpmu_read, +}; + void hw_perf_enable(void) { @@ -2994,6 +3006,8 @@ init_hw_perf_events(void) perf_max_events = -1; } + perf_pmu_register(&pmu); + return 0; } arch_initcall(init_hw_perf_events); |