aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-16 14:37:10 +0200
committerIngo Molnar <mingo@elte.hu>2010-09-09 20:46:30 +0200
commita4eaf7f14675cb512d69f0c928055e73d0c6d252 (patch)
treee8a0f631fc28d4bd9becd2e9e2c71743c64ee3ec /include/linux/perf_event.h
parentfa407f35e0298d841e4088f95a7f9cf6e725c6d5 (diff)
downloadkernel_samsung_smdk4412-a4eaf7f14675cb512d69f0c928055e73d0c6d252.zip
kernel_samsung_smdk4412-a4eaf7f14675cb512d69f0c928055e73d0c6d252.tar.gz
kernel_samsung_smdk4412-a4eaf7f14675cb512d69f0c928055e73d0c6d252.tar.bz2
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with pmu::{add,del,start,stop}, all of which take a flags argument. The new interface extends the capability to stop a counter while keeping it scheduled on the PMU. We replace the throttled state with the generic stopped state. This also allows us to efficiently stop/start counters over certain code paths (like IRQ handlers). It also allows scheduling a counter without it starting, allowing for a generic frozen state (useful for rotating stopped counters). The stopped state is implemented in two different ways, depending on how the architecture implemented the throttled state: 1) We disable the counter: a) the pmu has per-counter enable bits, we flip that b) we program a NOP event, preserving the counter state 2) We store the counter state and ignore all read/overflow events Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h54
1 files changed, 41 insertions, 13 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8cafa15..402073c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -538,6 +538,7 @@ struct hw_perf_event {
};
#endif
};
+ int state;
local64_t prev_count;
u64 sample_period;
u64 last_period;
@@ -549,6 +550,13 @@ struct hw_perf_event {
#endif
};
+/*
+ * hw_perf_event::state flags
+ */
+#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
+#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
+#define PERF_HES_ARCH 0x04
+
struct perf_event;
/*
@@ -564,42 +572,62 @@ struct pmu {
int *pmu_disable_count;
+ /*
+ * Fully disable/enable this PMU, can be used to protect from the PMI
+ * as well as for lazy/batch writing of the MSRs.
+ */
void (*pmu_enable) (struct pmu *pmu); /* optional */
void (*pmu_disable) (struct pmu *pmu); /* optional */
/*
+ * Try and initialize the event for this PMU.
* Should return -ENOENT when the @event doesn't match this PMU.
*/
int (*event_init) (struct perf_event *event);
- int (*enable) (struct perf_event *event);
- void (*disable) (struct perf_event *event);
- int (*start) (struct perf_event *event);
- void (*stop) (struct perf_event *event);
+#define PERF_EF_START 0x01 /* start the counter when adding */
+#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
+#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
+
+ /*
+ * Adds/Removes a counter to/from the PMU, can be done inside
+ * a transaction, see the ->*_txn() methods.
+ */
+ int (*add) (struct perf_event *event, int flags);
+ void (*del) (struct perf_event *event, int flags);
+
+ /*
+ * Starts/Stops a counter present on the PMU. The PMI handler
+ * should stop the counter when perf_event_overflow() returns
+ * !0. ->start() will be used to continue.
+ */
+ void (*start) (struct perf_event *event, int flags);
+ void (*stop) (struct perf_event *event, int flags);
+
+ /*
+ * Updates the counter value of the event.
+ */
void (*read) (struct perf_event *event);
- void (*unthrottle) (struct perf_event *event);
/*
* Group events scheduling is treated as a transaction, add
* group events as a whole and perform one schedulability test.
* If the test fails, roll back the whole group
- */
-
- /*
- * Start the transaction, after this ->enable() doesn't need to
+ *
+ * Start the transaction, after this ->add() doesn't need to
* do schedulability tests.
*/
void (*start_txn) (struct pmu *pmu); /* optional */
/*
- * If ->start_txn() disabled the ->enable() schedulability test
+ * If ->start_txn() disabled the ->add() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
int (*commit_txn) (struct pmu *pmu); /* optional */
/*
- * Will cancel the transaction, assumes ->disable() is called
- * for each successfull ->enable() during the transaction.
+ * Will cancel the transaction, assumes ->del() is called
+ * for each successfull ->add() during the transaction.
*/
void (*cancel_txn) (struct pmu *pmu); /* optional */
};
@@ -680,7 +708,7 @@ struct perf_event {
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
- struct pmu *pmu;
+ struct pmu *pmu;
enum perf_event_active_state state;
unsigned int attach_state;