* See also PERF_RECORD_MISC_EXACT_IP
*/
precise_ip : 2, /* skid constraint */
+ mmap_data : 1, /* non-exec mmap data */
- __reserved_1 : 47;
+ __reserved_1 : 46;
union {
__u32 wakeup_events; /* wakeup every n events */
#ifdef CONFIG_PERF_EVENTS
# include <asm/perf_event.h>
+# include <asm/local64.h>
#endif
struct perf_guest_info_callbacks {
struct hrtimer hrtimer;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- /* breakpoint */
- struct arch_hw_breakpoint info;
+ struct { /* breakpoint */
+ struct arch_hw_breakpoint info;
+ struct list_head bp_list;
+ };
#endif
};
- atomic64_t prev_count;
+ local64_t prev_count;
u64 sample_period;
u64 last_period;
- atomic64_t period_left;
+ local64_t period_left;
u64 interrupts;
u64 freq_time_stamp;
struct perf_event;
-#define PERF_EVENT_TXN_STARTED 1
+/*
+ * Common implementation detail of pmu::{start,commit,cancel}_txn
+ */
+#define PERF_EVENT_TXN 0x1
/**
* struct pmu - generic performance monitoring unit
void (*unthrottle) (struct perf_event *event);
/*
- * group events scheduling is treated as a transaction,
- * add group events as a whole and perform one schedulability test.
- * If test fails, roll back the whole group
+ * Group events scheduling is treated as a transaction, add group
+ * events as a whole and perform one schedulability test. If the test
+ * fails, roll back the whole group
*/
- void (*start_txn) (const struct pmu *pmu);
- void (*cancel_txn) (const struct pmu *pmu);
- int (*commit_txn) (const struct pmu *pmu);
+ /*
+ * Start the transaction, after this ->enable() doesn't need
+ * to do schedulability tests.
+ */
+ void (*start_txn) (struct pmu *pmu);
+ /*
+ * If ->start_txn() disabled the ->enable() schedulability test
+ * then ->commit_txn() is required to perform one. On success
+ * the transaction is closed. On error the transaction is kept
+ * open until ->cancel_txn() is called.
+ */
+ int (*commit_txn) (struct pmu *pmu);
+ /*
+ * Will cancel the transaction, assumes ->disable() is called for
+ * each successfull ->enable() during the transaction.
+ */
+ void (*cancel_txn) (struct pmu *pmu);
};
/**
struct file;
-struct perf_mmap_data {
+#define PERF_BUFFER_WRITABLE 0x01
+
+struct perf_buffer {
atomic_t refcount;
struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
- const struct pmu *pmu;
+ struct pmu *pmu;
enum perf_event_active_state state;
unsigned int attach_state;
- atomic64_t count;
+ local64_t count;
+ atomic64_t child_count;
/*
* These are the total time in nanoseconds that the event
atomic_t mmap_count;
int mmap_locked;
struct user_struct *mmap_user;
- struct perf_mmap_data *data;
+ struct perf_buffer *buffer;
/* poll related */
wait_queue_head_t waitq;
struct rcu_head rcu_head;
};
+/*
+ * Number of contexts where an event can trigger:
+ * task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS 4
+
/**
* struct perf_event_cpu_context - per cpu event context structure
*/
struct mutex hlist_mutex;
int hlist_refcount;
- /*
- * Recursion avoidance:
- *
- * task, softirq, irq, nmi context
- */
- int recursion[4];
+ /* Recursion avoidance in each contexts */
+ int recursion[PERF_NR_CONTEXTS];
};
struct perf_output_handle {
struct perf_event *event;
- struct perf_mmap_data *data;
+ struct perf_buffer *buffer;
unsigned long wakeup;
unsigned long size;
void *addr;
*/
extern int perf_max_events;
-extern const struct pmu *hw_perf_event_init(struct perf_event *event);
+extern struct pmu *hw_perf_event_init(struct perf_event *event);
extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
-extern void
-perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
+#ifndef perf_arch_fetch_caller_regs
+static inline void
+perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
+#endif
/*
* Take a snapshot of the regs. Skip ip and frame pointer to
* - bp for callchains
* - eflags, for future purposes, just in case
*/
-static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
+static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
- unsigned long ip;
-
memset(regs, 0, sizeof(*regs));
- switch (skip) {
- case 1 :
- ip = CALLER_ADDR0;
- break;
- case 2 :
- ip = CALLER_ADDR1;
- break;
- case 3 :
- ip = CALLER_ADDR2;
- break;
- case 4:
- ip = CALLER_ADDR3;
- break;
- /* No need to support further for now */
- default:
- ip = 0;
- }
-
- return perf_arch_fetch_caller_regs(regs, ip, skip);
+ perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
static inline void
struct pt_regs hot_regs;
if (!regs) {
- perf_fetch_caller_regs(&hot_regs, 1);
+ perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs;
}
__perf_sw_event(event_id, nr, nmi, regs, addr);
}
}
-extern void __perf_event_mmap(struct vm_area_struct *vma);
-
-static inline void perf_event_mmap(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & VM_EXEC)
- __perf_event_mmap(vma);
-}
-
+extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+/* Callchains */
+DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
+
+extern void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs);
+
+
+static inline void
+perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern void perf_event_init(void);
extern void perf_tp_event(u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs,
- struct hlist_head *head);
+ struct hlist_head *head, int rctx);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
#define perf_cpu_notifier(fn) \
do { \
static struct notifier_block fn##_nb __cpuinitdata = \
- { .notifier_call = fn, .priority = 20 }; \
+ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
(void *)(unsigned long)smp_processor_id()); \
fn(&fn##_nb, (unsigned long)CPU_STARTING, \