1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
9 #define RING_BUFFER_WRITABLE 0x01
13 struct rcu_head rcu_head;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15 struct work_struct work;
16 int page_order; /* allocation order */
18 int nr_pages; /* nr of data pages */
19 int overwrite; /* can overwrite itself */
21 atomic_t poll; /* POLL_ for wakeups */
23 local_t head; /* write position */
24 local_t nest; /* nested writers */
25 local_t events; /* event limit */
26 local_t wakeup; /* wakeup stamp */
27 local_t lost; /* nr records lost */
29 long watermark; /* wakeup watermark */
32 spinlock_t event_lock;
33 struct list_head event_list;
36 unsigned long mmap_locked;
37 struct user_struct *mmap_user;
43 unsigned long aux_pgoff;
46 atomic_t aux_mmap_count;
47 unsigned long aux_mmap_locked;
48 void (*free_aux)(void *);
49 atomic_t aux_refcount;
53 struct perf_event_mmap_page *user_page;
57 extern void rb_free(struct ring_buffer *rb);
58 extern struct ring_buffer *
59 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
60 extern void perf_event_wakeup(struct perf_event *event);
61 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
62 pgoff_t pgoff, int nr_pages, long watermark, int flags);
63 extern void rb_free_aux(struct ring_buffer *rb);
64 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
65 extern void ring_buffer_put(struct ring_buffer *rb);
67 static inline bool rb_has_aux(struct ring_buffer *rb)
69 return !!rb->aux_nr_pages;
72 void perf_event_aux_event(struct perf_event *event, unsigned long head,
73 unsigned long size, u64 flags);
76 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
78 #ifdef CONFIG_PERF_USE_VMALLOC
80 * Back perf_mmap() with vmalloc memory.
82 * Required for architectures that have d-cache aliasing issues.
85 static inline int page_order(struct ring_buffer *rb)
87 return rb->page_order;
92 static inline int page_order(struct ring_buffer *rb)
98 static inline unsigned long perf_data_size(struct ring_buffer *rb)
100 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
103 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
105 return rb->aux_nr_pages << PAGE_SHIFT;
108 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
109 static inline unsigned long \
110 func_name(struct perf_output_handle *handle, \
111 const void *buf, unsigned long len) \
113 unsigned long size, written; \
116 size = min(handle->size, len); \
117 written = memcpy_func(handle->addr, buf, size); \
118 written = size - written; \
121 handle->addr += written; \
123 handle->size -= written; \
124 if (!handle->size) { \
125 struct ring_buffer *rb = handle->rb; \
128 handle->page &= rb->nr_pages - 1; \
129 handle->addr = rb->data_pages[handle->page]; \
130 handle->size = PAGE_SIZE << page_order(rb); \
132 } while (len && written == size); \
137 static inline unsigned long
138 memcpy_common(void *dst, const void *src, unsigned long n)
144 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
146 static inline unsigned long
147 memcpy_skip(void *dst, const void *src, unsigned long n)
152 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
154 #ifndef arch_perf_out_copy_user
155 #define arch_perf_out_copy_user arch_perf_out_copy_user
157 static inline unsigned long
158 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
163 ret = __copy_from_user_inatomic(dst, src, n);
170 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
172 /* Callchain handling */
173 extern struct perf_callchain_entry *
174 perf_callchain(struct perf_event *event, struct pt_regs *regs);
175 extern int get_callchain_buffers(void);
176 extern void put_callchain_buffers(void);
178 static inline int get_recursion_context(int *recursion)
186 else if (in_softirq())
200 static inline void put_recursion_context(int *recursion, int rctx)
206 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
207 static inline bool arch_perf_have_user_stack_dump(void)
212 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
214 static inline bool arch_perf_have_user_stack_dump(void)
219 #define perf_user_stack_pointer(regs) 0
220 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
222 #endif /* _KERNEL_EVENTS_INTERNAL_H */