sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"`
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-static const struct pmu pmu = {
+static struct pmu pmu = {
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.read = alpha_pmu_read,
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.read = alpha_pmu_read,
/*
* Main entry point to initialise a HW performance event.
*/
/*
* Main entry point to initialise a HW performance event.
*/
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+struct pmu *hw_perf_event_init(struct perf_event *event)
hw_perf_event_init(struct perf_event *event)
{
int err = 0;
hw_perf_event_init(struct perf_event *event)
{
int err = 0;
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-void power_pmu_start_txn(const struct pmu *pmu)
+void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-void power_pmu_cancel_txn(const struct pmu *pmu)
+void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
* Perform the group schedulability test as a whole
* Return 0 if success
*/
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-int power_pmu_commit_txn(const struct pmu *pmu)
+int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
{
struct cpu_hw_events *cpuhw;
long i, n;
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+struct pmu *hw_perf_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
{
u64 ev;
unsigned long flags;
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+struct pmu *hw_perf_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
sh_perf_event_update(event, &event->hw, event->hw.idx);
}
sh_perf_event_update(event, &event->hw, event->hw.idx);
}
-static const struct pmu pmu = {
+static struct pmu pmu = {
.enable = sh_pmu_enable,
.disable = sh_pmu_disable,
.read = sh_pmu_read,
};
.enable = sh_pmu_enable,
.disable = sh_pmu_disable,
.read = sh_pmu_read,
};
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+struct pmu *hw_perf_event_init(struct perf_event *event)
{
int err = __hw_perf_event_init(event);
if (unlikely(err)) {
{
int err = __hw_perf_event_init(event);
if (unlikely(err)) {
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-static void sparc_pmu_start_txn(const struct pmu *pmu)
+static void sparc_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-static void sparc_pmu_cancel_txn(const struct pmu *pmu)
+static void sparc_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
* Perform the group schedulability test as a whole
* Return 0 if success
*/
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-static int sparc_pmu_commit_txn(const struct pmu *pmu)
+static int sparc_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n;
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n;
-static const struct pmu pmu = {
+static struct pmu pmu = {
.enable = sparc_pmu_enable,
.disable = sparc_pmu_disable,
.read = sparc_pmu_read,
.enable = sparc_pmu_enable,
.disable = sparc_pmu_disable,
.read = sparc_pmu_read,
.commit_txn = sparc_pmu_commit_txn,
};
.commit_txn = sparc_pmu_commit_txn,
};
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+struct pmu *hw_perf_event_init(struct perf_event *event)
{
int err = __hw_perf_event_init(event);
{
int err = __hw_perf_event_init(event);
-static const struct pmu pmu;
static inline int is_x86_event(struct perf_event *event)
{
static inline int is_x86_event(struct perf_event *event)
{
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-static void x86_pmu_start_txn(const struct pmu *pmu)
+static void x86_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-static void x86_pmu_cancel_txn(const struct pmu *pmu)
+static void x86_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
* Perform the group schedulability test as a whole
* Return 0 if success
*/
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-static int x86_pmu_commit_txn(const struct pmu *pmu)
+static int x86_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int assign[X86_PMC_IDX_MAX];
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int assign[X86_PMC_IDX_MAX];
-static const struct pmu pmu = {
+static struct pmu pmu = {
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
.start = x86_pmu_start,
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
.start = x86_pmu_start,
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+struct pmu *hw_perf_event_init(struct perf_event *event)
int err;
err = __hw_perf_event_init(event);
int err;
err = __hw_perf_event_init(event);
* Start the transaction, after this ->enable() doesn't need
* to do schedulability tests.
*/
* Start the transaction, after this ->enable() doesn't need
* to do schedulability tests.
*/
- void (*start_txn) (const struct pmu *pmu);
+ void (*start_txn) (struct pmu *pmu);
/*
* If ->start_txn() disabled the ->enable() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
/*
* If ->start_txn() disabled the ->enable() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
- int (*commit_txn) (const struct pmu *pmu);
+ int (*commit_txn) (struct pmu *pmu);
/*
* Will cancel the transaction, assumes ->disable() is called for
* each successfull ->enable() during the transaction.
*/
/*
* Will cancel the transaction, assumes ->disable() is called for
* each successfull ->enable() during the transaction.
*/
- void (*cancel_txn) (const struct pmu *pmu);
+ void (*cancel_txn) (struct pmu *pmu);
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
enum perf_event_active_state state;
unsigned int attach_state;
enum perf_event_active_state state;
unsigned int attach_state;
*/
extern int perf_max_events;
*/
extern int perf_max_events;
-extern const struct pmu *hw_perf_event_init(struct perf_event *event);
+extern struct pmu *hw_perf_event_init(struct perf_event *event);
extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
/*
* Architecture provided APIs - weak aliases:
*/
/*
* Architecture provided APIs - weak aliases:
*/
-extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
+extern __weak struct pmu *hw_perf_event_init(struct perf_event *event)
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
- const struct pmu *pmu = group_event->pmu;
+ struct pmu *pmu = group_event->pmu;
bool txn = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
bool txn = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
-static const struct pmu perf_ops_generic = {
+static struct pmu perf_ops_generic = {
.enable = perf_swevent_enable,
.disable = perf_swevent_disable,
.start = perf_swevent_int,
.enable = perf_swevent_enable,
.disable = perf_swevent_disable,
.start = perf_swevent_int,
cpu_clock_perf_event_update(event);
}
cpu_clock_perf_event_update(event);
}
-static const struct pmu perf_ops_cpu_clock = {
+static struct pmu perf_ops_cpu_clock = {
.enable = cpu_clock_perf_event_enable,
.disable = cpu_clock_perf_event_disable,
.read = cpu_clock_perf_event_read,
.enable = cpu_clock_perf_event_enable,
.disable = cpu_clock_perf_event_disable,
.read = cpu_clock_perf_event_read,
task_clock_perf_event_update(event, time);
}
task_clock_perf_event_update(event, time);
}
-static const struct pmu perf_ops_task_clock = {
+static struct pmu perf_ops_task_clock = {
.enable = task_clock_perf_event_enable,
.disable = task_clock_perf_event_disable,
.read = task_clock_perf_event_read,
.enable = task_clock_perf_event_enable,
.disable = task_clock_perf_event_disable,
.read = task_clock_perf_event_read,
#ifdef CONFIG_EVENT_TRACING
#ifdef CONFIG_EVENT_TRACING
-static const struct pmu perf_ops_tracepoint = {
+static struct pmu perf_ops_tracepoint = {
.enable = perf_trace_enable,
.disable = perf_trace_disable,
.start = perf_swevent_int,
.enable = perf_trace_enable,
.disable = perf_trace_disable,
.start = perf_swevent_int,
perf_trace_destroy(event);
}
perf_trace_destroy(event);
}
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static struct pmu *tp_perf_event_init(struct perf_event *event)
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static struct pmu *tp_perf_event_init(struct perf_event *event)
release_bp_slot(event);
}
release_bp_slot(event);
}
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+static struct pmu *bp_perf_event_init(struct perf_event *bp)
perf_swevent_add(bp, 1, 1, &sample, regs);
}
#else
perf_swevent_add(bp, 1, 1, &sample, regs);
}
#else
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+static struct pmu *bp_perf_event_init(struct perf_event *bp)
swevent_hlist_put(event);
}
swevent_hlist_put(event);
}
-static const struct pmu *sw_perf_event_init(struct perf_event *event)
+static struct pmu *sw_perf_event_init(struct perf_event *event)
- const struct pmu *pmu = NULL;
+ struct pmu *pmu = NULL;
u64 event_id = event->attr.config;
/*
u64 event_id = event->attr.config;
/*
perf_overflow_handler_t overflow_handler,
gfp_t gfpflags)
{
perf_overflow_handler_t overflow_handler,
gfp_t gfpflags)
{
struct perf_event *event;
struct hw_perf_event *hwc;
long err;
struct perf_event *event;
struct hw_perf_event *hwc;
long err;