Merge branch 'for-linus-1' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[firefly-linux-kernel-4.4.55.git] / include / linux / perf_event.h
index a503100388cccaabc0bee0101bdda8514046d932..61992cf2e9771699ee06595c8fbb1bd39633018a 100644 (file)
@@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
 #include <linux/sysfs.h>
 #include <linux/perf_regs.h>
 #include <linux/workqueue.h>
+#include <linux/cgroup.h>
 #include <asm/local.h>
 
 struct perf_callchain_entry {
@@ -118,10 +119,19 @@ struct hw_perf_event {
                        struct hrtimer  hrtimer;
                };
                struct { /* tracepoint */
-                       struct task_struct      *tp_target;
                        /* for tp_event->class */
                        struct list_head        tp_list;
                };
+               struct { /* intel_cqm */
+                       int                     cqm_state;
+                       int                     cqm_rmid;
+                       struct list_head        cqm_events_entry;
+                       struct list_head        cqm_groups_entry;
+                       struct list_head        cqm_group_entry;
+               };
+               struct { /* itrace */
+                       int                     itrace_started;
+               };
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
                struct { /* breakpoint */
                        /*
@@ -129,12 +139,12 @@ struct hw_perf_event {
                         * problem hw_breakpoint has with context
                         * creation and event initalization.
                         */
-                       struct task_struct              *bp_target;
                        struct arch_hw_breakpoint       info;
                        struct list_head                bp_list;
                };
 #endif
        };
+       struct task_struct              *target;
        int                             state;
        local64_t                       prev_count;
        u64                             sample_period;
@@ -166,6 +176,11 @@ struct perf_event;
  * pmu::capabilities flags
  */
 #define PERF_PMU_CAP_NO_INTERRUPT              0x01
+#define PERF_PMU_CAP_NO_NMI                    0x02
+#define PERF_PMU_CAP_AUX_NO_SG                 0x04
+#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF          0x08
+#define PERF_PMU_CAP_EXCLUSIVE                 0x10
+#define PERF_PMU_CAP_ITRACE                    0x20
 
 /**
  * struct pmu - generic performance monitoring unit
@@ -186,6 +201,7 @@ struct pmu {
 
        int * __percpu                  pmu_disable_count;
        struct perf_cpu_context * __percpu pmu_cpu_context;
+       atomic_t                        exclusive_cnt; /* < 0: cpu; > 0: tsk */
        int                             task_ctx_nr;
        int                             hrtimer_interval_ms;
 
@@ -271,6 +287,23 @@ struct pmu {
         */
        size_t                          task_ctx_size;
 
+
+       /*
+        * Return the count value for a counter.
+        */
+       u64 (*count)                    (struct perf_event *event); /*optional*/
+
+       /*
+        * Set up pmu-private data structures for an AUX area
+        */
+       void *(*setup_aux)              (int cpu, void **pages,
+                                        int nr_pages, bool overwrite);
+                                       /* optional */
+
+       /*
+        * Free pmu-private AUX data structures
+        */
+       void (*free_aux)                (void *aux); /* optional */
 };
 
 /**
@@ -445,6 +478,7 @@ struct perf_event {
        struct pid_namespace            *ns;
        u64                             id;
 
+       u64                             (*clock)(void);
        perf_overflow_handler_t         overflow_handler;
        void                            *overflow_handler_context;
 
@@ -543,12 +577,52 @@ struct perf_output_handle {
        struct ring_buffer              *rb;
        unsigned long                   wakeup;
        unsigned long                   size;
-       void                            *addr;
+       union {
+               void                    *addr;
+               unsigned long           head;
+       };
        int                             page;
 };
 
+#ifdef CONFIG_CGROUP_PERF
+
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+       u64                             time;
+       u64                             timestamp;
+};
+
+struct perf_cgroup {
+       struct cgroup_subsys_state      css;
+       struct perf_cgroup_info __percpu *info;
+};
+
+/*
+ * Must ensure cgroup is pinned (css_get) before calling
+ * this function. In other words, we cannot call this function
+ * if there is no cgroup event for the current CPU context.
+ */
+static inline struct perf_cgroup *
+perf_cgroup_from_task(struct task_struct *task)
+{
+       return container_of(task_css(task, perf_event_cgrp_id),
+                           struct perf_cgroup, css);
+}
+#endif /* CONFIG_CGROUP_PERF */
+
 #ifdef CONFIG_PERF_EVENTS
 
+extern void *perf_aux_output_begin(struct perf_output_handle *handle,
+                                  struct perf_event *event);
+extern void perf_aux_output_end(struct perf_output_handle *handle,
+                               unsigned long size, bool truncated);
+extern int perf_aux_output_skip(struct perf_output_handle *handle,
+                               unsigned long size);
+extern void *perf_get_aux(struct perf_output_handle *handle);
+
 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
 extern void perf_pmu_unregister(struct pmu *pmu);
 
@@ -740,6 +814,11 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
                __perf_event_task_sched_out(prev, next);
 }
 
+static inline u64 __perf_event_count(struct perf_event *event)
+{
+       return local64_read(&event->count) + atomic64_read(&event->child_count);
+}
+
 extern void perf_event_mmap(struct vm_area_struct *vma);
 extern struct perf_guest_info_callbacks *perf_guest_cbs;
 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -814,6 +893,11 @@ static inline bool needs_branch_stack(struct perf_event *event)
        return event->attr.branch_sample_type != 0;
 }
 
+static inline bool has_aux(struct perf_event *event)
+{
+       return event->pmu->setup_aux;
+}
+
 extern int perf_output_begin(struct perf_output_handle *handle,
                             struct perf_event *event, unsigned int size);
 extern void perf_output_end(struct perf_output_handle *handle);
@@ -829,6 +913,17 @@ extern void perf_event_disable(struct perf_event *event);
 extern int __perf_event_disable(void *info);
 extern void perf_event_task_tick(void);
 #else /* !CONFIG_PERF_EVENTS: */
+static inline void *
+perf_aux_output_begin(struct perf_output_handle *handle,
+                     struct perf_event *event)                         { return NULL; }
+static inline void
+perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+                   bool truncated)                                     { }
+static inline int
+perf_aux_output_skip(struct perf_output_handle *handle,
+                    unsigned long size)                                { return -EINVAL; }
+static inline void *
+perf_get_aux(struct perf_output_handle *handle)                                { return NULL; }
 static inline void
 perf_event_task_sched_in(struct task_struct *prev,
                         struct task_struct *task)                      { }