Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ftrace_event.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/trace_seq.h>
10 #include <linux/spinlock.h>
11 #include <linux/irq_work.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/hardirq.h>
15 #include <linux/kthread.h>      /* for self test */
16 #include <linux/kmemcheck.h>
17 #include <linux/module.h>
18 #include <linux/percpu.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/hash.h>
24 #include <linux/list.h>
25 #include <linux/cpu.h>
26 #include <linux/fs.h>
27
28 #include <asm/local.h>
29
30 static void update_pages_handler(struct work_struct *work);
31
32 /*
33  * The ring buffer header is special. We must manually up keep it.
34  */
35 int ring_buffer_print_entry_header(struct trace_seq *s)
36 {
37         int ret;
38
39         ret = trace_seq_printf(s, "# compressed entry header\n");
40         ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
41         ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
42         ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
43         ret = trace_seq_printf(s, "\n");
44         ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
45                                RINGBUF_TYPE_PADDING);
46         ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
47                                RINGBUF_TYPE_TIME_EXTEND);
48         ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
49                                RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
50
51         return ret;
52 }
53
54 /*
55  * The ring buffer is made up of a list of pages. A separate list of pages is
56  * allocated for each CPU. A writer may only write to a buffer that is
57  * associated with the CPU it is currently executing on.  A reader may read
58  * from any per cpu buffer.
59  *
60  * The reader is special. For each per cpu buffer, the reader has its own
61  * reader page. When a reader has read the entire reader page, this reader
62  * page is swapped with another page in the ring buffer.
63  *
64  * Now, as long as the writer is off the reader page, the reader can do what
65  * ever it wants with that page. The writer will never write to that page
66  * again (as long as it is out of the ring buffer).
67  *
68  * Here's some silly ASCII art.
69  *
70  *   +------+
71  *   |reader|          RING BUFFER
72  *   |page  |
73  *   +------+        +---+   +---+   +---+
74  *                   |   |-->|   |-->|   |
75  *                   +---+   +---+   +---+
76  *                     ^               |
77  *                     |               |
78  *                     +---------------+
79  *
80  *
81  *   +------+
82  *   |reader|          RING BUFFER
83  *   |page  |------------------v
84  *   +------+        +---+   +---+   +---+
85  *                   |   |-->|   |-->|   |
86  *                   +---+   +---+   +---+
87  *                     ^               |
88  *                     |               |
89  *                     +---------------+
90  *
91  *
92  *   +------+
93  *   |reader|          RING BUFFER
94  *   |page  |------------------v
95  *   +------+        +---+   +---+   +---+
96  *      ^            |   |-->|   |-->|   |
97  *      |            +---+   +---+   +---+
98  *      |                              |
99  *      |                              |
100  *      +------------------------------+
101  *
102  *
103  *   +------+
104  *   |buffer|          RING BUFFER
105  *   |page  |------------------v
106  *   +------+        +---+   +---+   +---+
107  *      ^            |   |   |   |-->|   |
108  *      |   New      +---+   +---+   +---+
109  *      |  Reader------^               |
110  *      |   page                       |
111  *      +------------------------------+
112  *
113  *
114  * After we make this swap, the reader can hand this page off to the splice
115  * code and be done with it. It can even allocate a new page if it needs to
116  * and swap that into the ring buffer.
117  *
118  * We will be using cmpxchg soon to make all this lockless.
119  *
120  */
121
122 /*
123  * A fast way to enable or disable all ring buffers is to
124  * call tracing_on or tracing_off. Turning off the ring buffers
125  * prevents all ring buffers from being recorded to.
126  * Turning this switch on, makes it OK to write to the
127  * ring buffer, if the ring buffer is enabled itself.
128  *
129  * There's three layers that must be on in order to write
130  * to the ring buffer.
131  *
132  * 1) This global flag must be set.
133  * 2) The ring buffer must be enabled for recording.
134  * 3) The per cpu buffer must be enabled for recording.
135  *
136  * In case of an anomaly, this global flag has a bit set that
137  * will permantly disable all ring buffers.
138  */
139
140 /*
141  * Global flag to disable all recording to ring buffers
142  *  This has two bits: ON, DISABLED
143  *
144  *  ON   DISABLED
145  * ---- ----------
146  *   0      0        : ring buffers are off
147  *   1      0        : ring buffers are on
148  *   X      1        : ring buffers are permanently disabled
149  */
150
151 enum {
152         RB_BUFFERS_ON_BIT       = 0,
153         RB_BUFFERS_DISABLED_BIT = 1,
154 };
155
156 enum {
157         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
158         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
159 };
160
161 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
162
163 /* Used for individual buffers (after the counter) */
164 #define RB_BUFFER_OFF           (1 << 20)
165
166 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
167
168 /**
169  * tracing_off_permanent - permanently disable ring buffers
170  *
171  * This function, once called, will disable all ring buffers
172  * permanently.
173  */
174 void tracing_off_permanent(void)
175 {
176         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
177 }
178
179 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
180 #define RB_ALIGNMENT            4U
181 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
182 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
183
184 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
185 # define RB_FORCE_8BYTE_ALIGNMENT       0
186 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
187 #else
188 # define RB_FORCE_8BYTE_ALIGNMENT       1
189 # define RB_ARCH_ALIGNMENT              8U
190 #endif
191
192 #define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
193
194 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
195 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
196
197 enum {
198         RB_LEN_TIME_EXTEND = 8,
199         RB_LEN_TIME_STAMP = 16,
200 };
201
202 #define skip_time_extend(event) \
203         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
204
205 static inline int rb_null_event(struct ring_buffer_event *event)
206 {
207         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
208 }
209
210 static void rb_event_set_padding(struct ring_buffer_event *event)
211 {
212         /* padding has a NULL time_delta */
213         event->type_len = RINGBUF_TYPE_PADDING;
214         event->time_delta = 0;
215 }
216
217 static unsigned
218 rb_event_data_length(struct ring_buffer_event *event)
219 {
220         unsigned length;
221
222         if (event->type_len)
223                 length = event->type_len * RB_ALIGNMENT;
224         else
225                 length = event->array[0];
226         return length + RB_EVNT_HDR_SIZE;
227 }
228
229 /*
230  * Return the length of the given event. Will return
231  * the length of the time extend if the event is a
232  * time extend.
233  */
234 static inline unsigned
235 rb_event_length(struct ring_buffer_event *event)
236 {
237         switch (event->type_len) {
238         case RINGBUF_TYPE_PADDING:
239                 if (rb_null_event(event))
240                         /* undefined */
241                         return -1;
242                 return  event->array[0] + RB_EVNT_HDR_SIZE;
243
244         case RINGBUF_TYPE_TIME_EXTEND:
245                 return RB_LEN_TIME_EXTEND;
246
247         case RINGBUF_TYPE_TIME_STAMP:
248                 return RB_LEN_TIME_STAMP;
249
250         case RINGBUF_TYPE_DATA:
251                 return rb_event_data_length(event);
252         default:
253                 BUG();
254         }
255         /* not hit */
256         return 0;
257 }
258
259 /*
260  * Return total length of time extend and data,
261  *   or just the event length for all other events.
262  */
263 static inline unsigned
264 rb_event_ts_length(struct ring_buffer_event *event)
265 {
266         unsigned len = 0;
267
268         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
269                 /* time extends include the data event after it */
270                 len = RB_LEN_TIME_EXTEND;
271                 event = skip_time_extend(event);
272         }
273         return len + rb_event_length(event);
274 }
275
276 /**
277  * ring_buffer_event_length - return the length of the event
278  * @event: the event to get the length of
279  *
280  * Returns the size of the data load of a data event.
281  * If the event is something other than a data event, it
282  * returns the size of the event itself. With the exception
283  * of a TIME EXTEND, where it still returns the size of the
284  * data load of the data event after it.
285  */
286 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
287 {
288         unsigned length;
289
290         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
291                 event = skip_time_extend(event);
292
293         length = rb_event_length(event);
294         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
295                 return length;
296         length -= RB_EVNT_HDR_SIZE;
297         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
298                 length -= sizeof(event->array[0]);
299         return length;
300 }
301 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
302
303 /* inline for ring buffer fast paths */
304 static void *
305 rb_event_data(struct ring_buffer_event *event)
306 {
307         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
308                 event = skip_time_extend(event);
309         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
310         /* If length is in len field, then array[0] has the data */
311         if (event->type_len)
312                 return (void *)&event->array[0];
313         /* Otherwise length is in array[0] and array[1] has the data */
314         return (void *)&event->array[1];
315 }
316
317 /**
318  * ring_buffer_event_data - return the data of the event
319  * @event: the event to get the data from
320  */
321 void *ring_buffer_event_data(struct ring_buffer_event *event)
322 {
323         return rb_event_data(event);
324 }
325 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
326
327 #define for_each_buffer_cpu(buffer, cpu)                \
328         for_each_cpu(cpu, buffer->cpumask)
329
330 #define TS_SHIFT        27
331 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
332 #define TS_DELTA_TEST   (~TS_MASK)
333
334 /* Flag when events were overwritten */
335 #define RB_MISSED_EVENTS        (1 << 31)
336 /* Missed count stored at end */
337 #define RB_MISSED_STORED        (1 << 30)
338
339 struct buffer_data_page {
340         u64              time_stamp;    /* page time stamp */
341         local_t          commit;        /* write committed index */
342         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
343 };
344
345 /*
346  * Note, the buffer_page list must be first. The buffer pages
347  * are allocated in cache lines, which means that each buffer
348  * page will be at the beginning of a cache line, and thus
349  * the least significant bits will be zero. We use this to
350  * add flags in the list struct pointers, to make the ring buffer
351  * lockless.
352  */
353 struct buffer_page {
354         struct list_head list;          /* list of buffer pages */
355         local_t          write;         /* index for next write */
356         unsigned         read;          /* index for next read */
357         local_t          entries;       /* entries on this page */
358         unsigned long    real_end;      /* real end of data */
359         struct buffer_data_page *page;  /* Actual data page */
360 };
361
362 /*
363  * The buffer page counters, write and entries, must be reset
364  * atomically when crossing page boundaries. To synchronize this
365  * update, two counters are inserted into the number. One is
366  * the actual counter for the write position or count on the page.
367  *
368  * The other is a counter of updaters. Before an update happens
369  * the update partition of the counter is incremented. This will
370  * allow the updater to update the counter atomically.
371  *
372  * The counter is 20 bits, and the state data is 12.
373  */
374 #define RB_WRITE_MASK           0xfffff
375 #define RB_WRITE_INTCNT         (1 << 20)
376
377 static void rb_init_page(struct buffer_data_page *bpage)
378 {
379         local_set(&bpage->commit, 0);
380 }
381
382 /**
383  * ring_buffer_page_len - the size of data on the page.
384  * @page: The page to read
385  *
386  * Returns the amount of data on the page, including buffer page header.
387  */
388 size_t ring_buffer_page_len(void *page)
389 {
390         return local_read(&((struct buffer_data_page *)page)->commit)
391                 + BUF_PAGE_HDR_SIZE;
392 }
393
394 /*
395  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
396  * this issue out.
397  */
398 static void free_buffer_page(struct buffer_page *bpage)
399 {
400         free_page((unsigned long)bpage->page);
401         kfree(bpage);
402 }
403
404 /*
405  * We need to fit the time_stamp delta into 27 bits.
406  */
407 static inline int test_time_stamp(u64 delta)
408 {
409         if (delta & TS_DELTA_TEST)
410                 return 1;
411         return 0;
412 }
413
414 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
415
416 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
417 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
418
419 int ring_buffer_print_page_header(struct trace_seq *s)
420 {
421         struct buffer_data_page field;
422         int ret;
423
424         ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
425                                "offset:0;\tsize:%u;\tsigned:%u;\n",
426                                (unsigned int)sizeof(field.time_stamp),
427                                (unsigned int)is_signed_type(u64));
428
429         ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
430                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
431                                (unsigned int)offsetof(typeof(field), commit),
432                                (unsigned int)sizeof(field.commit),
433                                (unsigned int)is_signed_type(long));
434
435         ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
436                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
437                                (unsigned int)offsetof(typeof(field), commit),
438                                1,
439                                (unsigned int)is_signed_type(long));
440
441         ret = trace_seq_printf(s, "\tfield: char data;\t"
442                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
443                                (unsigned int)offsetof(typeof(field), data),
444                                (unsigned int)BUF_PAGE_SIZE,
445                                (unsigned int)is_signed_type(char));
446
447         return ret;
448 }
449
450 struct rb_irq_work {
451         struct irq_work                 work;
452         wait_queue_head_t               waiters;
453         bool                            waiters_pending;
454 };
455
456 /*
457  * head_page == tail_page && head == tail then buffer is empty.
458  */
459 struct ring_buffer_per_cpu {
460         int                             cpu;
461         atomic_t                        record_disabled;
462         struct ring_buffer              *buffer;
463         raw_spinlock_t                  reader_lock;    /* serialize readers */
464         arch_spinlock_t                 lock;
465         struct lock_class_key           lock_key;
466         unsigned int                    nr_pages;
467         struct list_head                *pages;
468         struct buffer_page              *head_page;     /* read from head */
469         struct buffer_page              *tail_page;     /* write to tail */
470         struct buffer_page              *commit_page;   /* committed pages */
471         struct buffer_page              *reader_page;
472         unsigned long                   lost_events;
473         unsigned long                   last_overrun;
474         local_t                         entries_bytes;
475         local_t                         entries;
476         local_t                         overrun;
477         local_t                         commit_overrun;
478         local_t                         dropped_events;
479         local_t                         committing;
480         local_t                         commits;
481         unsigned long                   read;
482         unsigned long                   read_bytes;
483         u64                             write_stamp;
484         u64                             read_stamp;
485         /* ring buffer pages to update, > 0 to add, < 0 to remove */
486         int                             nr_pages_to_update;
487         struct list_head                new_pages; /* new pages to add */
488         struct work_struct              update_pages_work;
489         struct completion               update_done;
490
491         struct rb_irq_work              irq_work;
492 };
493
494 struct ring_buffer {
495         unsigned                        flags;
496         int                             cpus;
497         atomic_t                        record_disabled;
498         atomic_t                        resize_disabled;
499         cpumask_var_t                   cpumask;
500
501         struct lock_class_key           *reader_lock_key;
502
503         struct mutex                    mutex;
504
505         struct ring_buffer_per_cpu      **buffers;
506
507 #ifdef CONFIG_HOTPLUG_CPU
508         struct notifier_block           cpu_notify;
509 #endif
510         u64                             (*clock)(void);
511
512         struct rb_irq_work              irq_work;
513 };
514
515 struct ring_buffer_iter {
516         struct ring_buffer_per_cpu      *cpu_buffer;
517         unsigned long                   head;
518         struct buffer_page              *head_page;
519         struct buffer_page              *cache_reader_page;
520         unsigned long                   cache_read;
521         u64                             read_stamp;
522 };
523
524 /*
525  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
526  *
527  * Schedules a delayed work to wake up any task that is blocked on the
528  * ring buffer waiters queue.
529  */
530 static void rb_wake_up_waiters(struct irq_work *work)
531 {
532         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
533
534         wake_up_all(&rbwork->waiters);
535 }
536
537 /**
538  * ring_buffer_wait - wait for input to the ring buffer
539  * @buffer: buffer to wait on
540  * @cpu: the cpu buffer to wait on
541  *
542  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543  * as data is added to any of the @buffer's cpu buffers. Otherwise
544  * it will wait for data to be added to a specific cpu buffer.
545  */
546 int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
547 {
548         struct ring_buffer_per_cpu *cpu_buffer;
549         DEFINE_WAIT(wait);
550         struct rb_irq_work *work;
551
552         /*
553          * Depending on what the caller is waiting for, either any
554          * data in any cpu buffer, or a specific buffer, put the
555          * caller on the appropriate wait queue.
556          */
557         if (cpu == RING_BUFFER_ALL_CPUS)
558                 work = &buffer->irq_work;
559         else {
560                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
561                         return -ENODEV;
562                 cpu_buffer = buffer->buffers[cpu];
563                 work = &cpu_buffer->irq_work;
564         }
565
566
567         prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
568
569         /*
570          * The events can happen in critical sections where
571          * checking a work queue can cause deadlocks.
572          * After adding a task to the queue, this flag is set
573          * only to notify events to try to wake up the queue
574          * using irq_work.
575          *
576          * We don't clear it even if the buffer is no longer
577          * empty. The flag only causes the next event to run
578          * irq_work to do the work queue wake up. The worse
579          * that can happen if we race with !trace_empty() is that
580          * an event will cause an irq_work to try to wake up
581          * an empty queue.
582          *
583          * There's no reason to protect this flag either, as
584          * the work queue and irq_work logic will do the necessary
585          * synchronization for the wake ups. The only thing
586          * that is necessary is that the wake up happens after
587          * a task has been queued. It's OK for spurious wake ups.
588          */
589         work->waiters_pending = true;
590
591         if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
592             (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
593                 schedule();
594
595         finish_wait(&work->waiters, &wait);
596         return 0;
597 }
598
599 /**
600  * ring_buffer_poll_wait - poll on buffer input
601  * @buffer: buffer to wait on
602  * @cpu: the cpu buffer to wait on
603  * @filp: the file descriptor
604  * @poll_table: The poll descriptor
605  *
606  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
607  * as data is added to any of the @buffer's cpu buffers. Otherwise
608  * it will wait for data to be added to a specific cpu buffer.
609  *
610  * Returns POLLIN | POLLRDNORM if data exists in the buffers,
611  * zero otherwise.
612  */
613 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
614                           struct file *filp, poll_table *poll_table)
615 {
616         struct ring_buffer_per_cpu *cpu_buffer;
617         struct rb_irq_work *work;
618
619         if (cpu == RING_BUFFER_ALL_CPUS)
620                 work = &buffer->irq_work;
621         else {
622                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
623                         return -EINVAL;
624
625                 cpu_buffer = buffer->buffers[cpu];
626                 work = &cpu_buffer->irq_work;
627         }
628
629         work->waiters_pending = true;
630         poll_wait(filp, &work->waiters, poll_table);
631
632         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
633             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
634                 return POLLIN | POLLRDNORM;
635         return 0;
636 }
637
638 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
639 #define RB_WARN_ON(b, cond)                                             \
640         ({                                                              \
641                 int _____ret = unlikely(cond);                          \
642                 if (_____ret) {                                         \
643                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
644                                 struct ring_buffer_per_cpu *__b =       \
645                                         (void *)b;                      \
646                                 atomic_inc(&__b->buffer->record_disabled); \
647                         } else                                          \
648                                 atomic_inc(&b->record_disabled);        \
649                         WARN_ON(1);                                     \
650                 }                                                       \
651                 _____ret;                                               \
652         })
653
654 /* Up this if you want to test the TIME_EXTENTS and normalization */
655 #define DEBUG_SHIFT 0
656
657 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
658 {
659         /* shift to debug/test normalization and TIME_EXTENTS */
660         return buffer->clock() << DEBUG_SHIFT;
661 }
662
663 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
664 {
665         u64 time;
666
667         preempt_disable_notrace();
668         time = rb_time_stamp(buffer);
669         preempt_enable_no_resched_notrace();
670
671         return time;
672 }
673 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
674
675 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
676                                       int cpu, u64 *ts)
677 {
678         /* Just stupid testing the normalize function and deltas */
679         *ts >>= DEBUG_SHIFT;
680 }
681 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
682
683 /*
684  * Making the ring buffer lockless makes things tricky.
685  * Although writes only happen on the CPU that they are on,
686  * and they only need to worry about interrupts. Reads can
687  * happen on any CPU.
688  *
689  * The reader page is always off the ring buffer, but when the
690  * reader finishes with a page, it needs to swap its page with
691  * a new one from the buffer. The reader needs to take from
692  * the head (writes go to the tail). But if a writer is in overwrite
693  * mode and wraps, it must push the head page forward.
694  *
695  * Here lies the problem.
696  *
697  * The reader must be careful to replace only the head page, and
698  * not another one. As described at the top of the file in the
699  * ASCII art, the reader sets its old page to point to the next
700  * page after head. It then sets the page after head to point to
701  * the old reader page. But if the writer moves the head page
702  * during this operation, the reader could end up with the tail.
703  *
704  * We use cmpxchg to help prevent this race. We also do something
705  * special with the page before head. We set the LSB to 1.
706  *
707  * When the writer must push the page forward, it will clear the
708  * bit that points to the head page, move the head, and then set
709  * the bit that points to the new head page.
710  *
711  * We also don't want an interrupt coming in and moving the head
712  * page on another writer. Thus we use the second LSB to catch
713  * that too. Thus:
714  *
715  * head->list->prev->next        bit 1          bit 0
716  *                              -------        -------
717  * Normal page                     0              0
718  * Points to head page             0              1
719  * New head page                   1              0
720  *
721  * Note we can not trust the prev pointer of the head page, because:
722  *
723  * +----+       +-----+        +-----+
724  * |    |------>|  T  |---X--->|  N  |
725  * |    |<------|     |        |     |
726  * +----+       +-----+        +-----+
727  *   ^                           ^ |
728  *   |          +-----+          | |
729  *   +----------|  R  |----------+ |
730  *              |     |<-----------+
731  *              +-----+
732  *
733  * Key:  ---X-->  HEAD flag set in pointer
734  *         T      Tail page
735  *         R      Reader page
736  *         N      Next page
737  *
738  * (see __rb_reserve_next() to see where this happens)
739  *
740  *  What the above shows is that the reader just swapped out
741  *  the reader page with a page in the buffer, but before it
742  *  could make the new header point back to the new page added
743  *  it was preempted by a writer. The writer moved forward onto
744  *  the new page added by the reader and is about to move forward
745  *  again.
746  *
747  *  You can see, it is legitimate for the previous pointer of
748  *  the head (or any page) not to point back to itself. But only
749  *  temporarially.
750  */
751
752 #define RB_PAGE_NORMAL          0UL
753 #define RB_PAGE_HEAD            1UL
754 #define RB_PAGE_UPDATE          2UL
755
756
757 #define RB_FLAG_MASK            3UL
758
759 /* PAGE_MOVED is not part of the mask */
760 #define RB_PAGE_MOVED           4UL
761
762 /*
763  * rb_list_head - remove any bit
764  */
765 static struct list_head *rb_list_head(struct list_head *list)
766 {
767         unsigned long val = (unsigned long)list;
768
769         return (struct list_head *)(val & ~RB_FLAG_MASK);
770 }
771
772 /*
773  * rb_is_head_page - test if the given page is the head page
774  *
775  * Because the reader may move the head_page pointer, we can
776  * not trust what the head page is (it may be pointing to
777  * the reader page). But if the next page is a header page,
778  * its flags will be non zero.
779  */
780 static inline int
781 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
782                 struct buffer_page *page, struct list_head *list)
783 {
784         unsigned long val;
785
786         val = (unsigned long)list->next;
787
788         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
789                 return RB_PAGE_MOVED;
790
791         return val & RB_FLAG_MASK;
792 }
793
794 /*
795  * rb_is_reader_page
796  *
797  * The unique thing about the reader page, is that, if the
798  * writer is ever on it, the previous pointer never points
799  * back to the reader page.
800  */
801 static int rb_is_reader_page(struct buffer_page *page)
802 {
803         struct list_head *list = page->list.prev;
804
805         return rb_list_head(list->next) != &page->list;
806 }
807
808 /*
809  * rb_set_list_to_head - set a list_head to be pointing to head.
810  */
811 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
812                                 struct list_head *list)
813 {
814         unsigned long *ptr;
815
816         ptr = (unsigned long *)&list->next;
817         *ptr |= RB_PAGE_HEAD;
818         *ptr &= ~RB_PAGE_UPDATE;
819 }
820
821 /*
822  * rb_head_page_activate - sets up head page
823  */
824 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
825 {
826         struct buffer_page *head;
827
828         head = cpu_buffer->head_page;
829         if (!head)
830                 return;
831
832         /*
833          * Set the previous list pointer to have the HEAD flag.
834          */
835         rb_set_list_to_head(cpu_buffer, head->list.prev);
836 }
837
838 static void rb_list_head_clear(struct list_head *list)
839 {
840         unsigned long *ptr = (unsigned long *)&list->next;
841
842         *ptr &= ~RB_FLAG_MASK;
843 }
844
845 /*
846  * rb_head_page_dactivate - clears head page ptr (for free list)
847  */
848 static void
849 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
850 {
851         struct list_head *hd;
852
853         /* Go through the whole list and clear any pointers found. */
854         rb_list_head_clear(cpu_buffer->pages);
855
856         list_for_each(hd, cpu_buffer->pages)
857                 rb_list_head_clear(hd);
858 }
859
860 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
861                             struct buffer_page *head,
862                             struct buffer_page *prev,
863                             int old_flag, int new_flag)
864 {
865         struct list_head *list;
866         unsigned long val = (unsigned long)&head->list;
867         unsigned long ret;
868
869         list = &prev->list;
870
871         val &= ~RB_FLAG_MASK;
872
873         ret = cmpxchg((unsigned long *)&list->next,
874                       val | old_flag, val | new_flag);
875
876         /* check if the reader took the page */
877         if ((ret & ~RB_FLAG_MASK) != val)
878                 return RB_PAGE_MOVED;
879
880         return ret & RB_FLAG_MASK;
881 }
882
883 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
884                                    struct buffer_page *head,
885                                    struct buffer_page *prev,
886                                    int old_flag)
887 {
888         return rb_head_page_set(cpu_buffer, head, prev,
889                                 old_flag, RB_PAGE_UPDATE);
890 }
891
892 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
893                                  struct buffer_page *head,
894                                  struct buffer_page *prev,
895                                  int old_flag)
896 {
897         return rb_head_page_set(cpu_buffer, head, prev,
898                                 old_flag, RB_PAGE_HEAD);
899 }
900
901 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
902                                    struct buffer_page *head,
903                                    struct buffer_page *prev,
904                                    int old_flag)
905 {
906         return rb_head_page_set(cpu_buffer, head, prev,
907                                 old_flag, RB_PAGE_NORMAL);
908 }
909
910 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
911                                struct buffer_page **bpage)
912 {
913         struct list_head *p = rb_list_head((*bpage)->list.next);
914
915         *bpage = list_entry(p, struct buffer_page, list);
916 }
917
918 static struct buffer_page *
919 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
920 {
921         struct buffer_page *head;
922         struct buffer_page *page;
923         struct list_head *list;
924         int i;
925
926         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
927                 return NULL;
928
929         /* sanity check */
930         list = cpu_buffer->pages;
931         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
932                 return NULL;
933
934         page = head = cpu_buffer->head_page;
935         /*
936          * It is possible that the writer moves the header behind
937          * where we started, and we miss in one loop.
938          * A second loop should grab the header, but we'll do
939          * three loops just because I'm paranoid.
940          */
941         for (i = 0; i < 3; i++) {
942                 do {
943                         if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
944                                 cpu_buffer->head_page = page;
945                                 return page;
946                         }
947                         rb_inc_page(cpu_buffer, &page);
948                 } while (page != head);
949         }
950
951         RB_WARN_ON(cpu_buffer, 1);
952
953         return NULL;
954 }
955
956 static int rb_head_page_replace(struct buffer_page *old,
957                                 struct buffer_page *new)
958 {
959         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
960         unsigned long val;
961         unsigned long ret;
962
963         val = *ptr & ~RB_FLAG_MASK;
964         val |= RB_PAGE_HEAD;
965
966         ret = cmpxchg(ptr, val, (unsigned long)&new->list);
967
968         return ret == val;
969 }
970
971 /*
972  * rb_tail_page_update - move the tail page forward
973  *
974  * Returns 1 if moved tail page, 0 if someone else did.
975  */
976 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
977                                struct buffer_page *tail_page,
978                                struct buffer_page *next_page)
979 {
980         struct buffer_page *old_tail;
981         unsigned long old_entries;
982         unsigned long old_write;
983         int ret = 0;
984
985         /*
986          * The tail page now needs to be moved forward.
987          *
988          * We need to reset the tail page, but without messing
989          * with possible erasing of data brought in by interrupts
990          * that have moved the tail page and are currently on it.
991          *
992          * We add a counter to the write field to denote this.
993          */
994         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
995         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
996
997         /*
998          * Just make sure we have seen our old_write and synchronize
999          * with any interrupts that come in.
1000          */
1001         barrier();
1002
1003         /*
1004          * If the tail page is still the same as what we think
1005          * it is, then it is up to us to update the tail
1006          * pointer.
1007          */
1008         if (tail_page == cpu_buffer->tail_page) {
1009                 /* Zero the write counter */
1010                 unsigned long val = old_write & ~RB_WRITE_MASK;
1011                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1012
1013                 /*
1014                  * This will only succeed if an interrupt did
1015                  * not come in and change it. In which case, we
1016                  * do not want to modify it.
1017                  *
1018                  * We add (void) to let the compiler know that we do not care
1019                  * about the return value of these functions. We use the
1020                  * cmpxchg to only update if an interrupt did not already
1021                  * do it for us. If the cmpxchg fails, we don't care.
1022                  */
1023                 (void)local_cmpxchg(&next_page->write, old_write, val);
1024                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1025
1026                 /*
1027                  * No need to worry about races with clearing out the commit.
1028                  * it only can increment when a commit takes place. But that
1029                  * only happens in the outer most nested commit.
1030                  */
1031                 local_set(&next_page->page->commit, 0);
1032
1033                 old_tail = cmpxchg(&cpu_buffer->tail_page,
1034                                    tail_page, next_page);
1035
1036                 if (old_tail == tail_page)
1037                         ret = 1;
1038         }
1039
1040         return ret;
1041 }
1042
1043 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1044                           struct buffer_page *bpage)
1045 {
1046         unsigned long val = (unsigned long)bpage;
1047
1048         if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1049                 return 1;
1050
1051         return 0;
1052 }
1053
1054 /**
1055  * rb_check_list - make sure a pointer to a list has the last bits zero
1056  */
1057 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1058                          struct list_head *list)
1059 {
1060         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1061                 return 1;
1062         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1063                 return 1;
1064         return 0;
1065 }
1066
1067 /**
1068  * check_pages - integrity check of buffer pages
1069  * @cpu_buffer: CPU buffer with pages to test
1070  *
1071  * As a safety measure we check to make sure the data pages have not
1072  * been corrupted.
1073  */
1074 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1075 {
1076         struct list_head *head = cpu_buffer->pages;
1077         struct buffer_page *bpage, *tmp;
1078
1079         /* Reset the head page if it exists */
1080         if (cpu_buffer->head_page)
1081                 rb_set_head_page(cpu_buffer);
1082
1083         rb_head_page_deactivate(cpu_buffer);
1084
1085         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1086                 return -1;
1087         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1088                 return -1;
1089
1090         if (rb_check_list(cpu_buffer, head))
1091                 return -1;
1092
1093         list_for_each_entry_safe(bpage, tmp, head, list) {
1094                 if (RB_WARN_ON(cpu_buffer,
1095                                bpage->list.next->prev != &bpage->list))
1096                         return -1;
1097                 if (RB_WARN_ON(cpu_buffer,
1098                                bpage->list.prev->next != &bpage->list))
1099                         return -1;
1100                 if (rb_check_list(cpu_buffer, &bpage->list))
1101                         return -1;
1102         }
1103
1104         rb_head_page_activate(cpu_buffer);
1105
1106         return 0;
1107 }
1108
1109 static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
1110 {
1111         int i;
1112         struct buffer_page *bpage, *tmp;
1113
1114         for (i = 0; i < nr_pages; i++) {
1115                 struct page *page;
1116                 /*
1117                  * __GFP_NORETRY flag makes sure that the allocation fails
1118                  * gracefully without invoking oom-killer and the system is
1119                  * not destabilized.
1120                  */
1121                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1122                                     GFP_KERNEL | __GFP_NORETRY,
1123                                     cpu_to_node(cpu));
1124                 if (!bpage)
1125                         goto free_pages;
1126
1127                 list_add(&bpage->list, pages);
1128
1129                 page = alloc_pages_node(cpu_to_node(cpu),
1130                                         GFP_KERNEL | __GFP_NORETRY, 0);
1131                 if (!page)
1132                         goto free_pages;
1133                 bpage->page = page_address(page);
1134                 rb_init_page(bpage->page);
1135         }
1136
1137         return 0;
1138
1139 free_pages:
1140         list_for_each_entry_safe(bpage, tmp, pages, list) {
1141                 list_del_init(&bpage->list);
1142                 free_buffer_page(bpage);
1143         }
1144
1145         return -ENOMEM;
1146 }
1147
1148 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1149                              unsigned nr_pages)
1150 {
1151         LIST_HEAD(pages);
1152
1153         WARN_ON(!nr_pages);
1154
1155         if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1156                 return -ENOMEM;
1157
1158         /*
1159          * The ring buffer page list is a circular list that does not
1160          * start and end with a list head. All page list items point to
1161          * other pages.
1162          */
1163         cpu_buffer->pages = pages.next;
1164         list_del(&pages);
1165
1166         cpu_buffer->nr_pages = nr_pages;
1167
1168         rb_check_pages(cpu_buffer);
1169
1170         return 0;
1171 }
1172
1173 static struct ring_buffer_per_cpu *
1174 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1175 {
1176         struct ring_buffer_per_cpu *cpu_buffer;
1177         struct buffer_page *bpage;
1178         struct page *page;
1179         int ret;
1180
1181         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1182                                   GFP_KERNEL, cpu_to_node(cpu));
1183         if (!cpu_buffer)
1184                 return NULL;
1185
1186         cpu_buffer->cpu = cpu;
1187         cpu_buffer->buffer = buffer;
1188         raw_spin_lock_init(&cpu_buffer->reader_lock);
1189         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1190         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1191         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1192         init_completion(&cpu_buffer->update_done);
1193         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1194         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1195
1196         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1197                             GFP_KERNEL, cpu_to_node(cpu));
1198         if (!bpage)
1199                 goto fail_free_buffer;
1200
1201         rb_check_bpage(cpu_buffer, bpage);
1202
1203         cpu_buffer->reader_page = bpage;
1204         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1205         if (!page)
1206                 goto fail_free_reader;
1207         bpage->page = page_address(page);
1208         rb_init_page(bpage->page);
1209
1210         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1211         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1212
1213         ret = rb_allocate_pages(cpu_buffer, nr_pages);
1214         if (ret < 0)
1215                 goto fail_free_reader;
1216
1217         cpu_buffer->head_page
1218                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1219         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1220
1221         rb_head_page_activate(cpu_buffer);
1222
1223         return cpu_buffer;
1224
1225  fail_free_reader:
1226         free_buffer_page(cpu_buffer->reader_page);
1227
1228  fail_free_buffer:
1229         kfree(cpu_buffer);
1230         return NULL;
1231 }
1232
1233 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1234 {
1235         struct list_head *head = cpu_buffer->pages;
1236         struct buffer_page *bpage, *tmp;
1237
1238         free_buffer_page(cpu_buffer->reader_page);
1239
1240         rb_head_page_deactivate(cpu_buffer);
1241
1242         if (head) {
1243                 list_for_each_entry_safe(bpage, tmp, head, list) {
1244                         list_del_init(&bpage->list);
1245                         free_buffer_page(bpage);
1246                 }
1247                 bpage = list_entry(head, struct buffer_page, list);
1248                 free_buffer_page(bpage);
1249         }
1250
1251         kfree(cpu_buffer);
1252 }
1253
1254 #ifdef CONFIG_HOTPLUG_CPU
1255 static int rb_cpu_notify(struct notifier_block *self,
1256                          unsigned long action, void *hcpu);
1257 #endif
1258
1259 /**
1260  * ring_buffer_alloc - allocate a new ring_buffer
1261  * @size: the size in bytes per cpu that is needed.
1262  * @flags: attributes to set for the ring buffer.
1263  *
1264  * Currently the only flag that is available is the RB_FL_OVERWRITE
1265  * flag. This flag means that the buffer will overwrite old data
1266  * when the buffer wraps. If this flag is not set, the buffer will
1267  * drop data when the tail hits the head.
1268  */
1269 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1270                                         struct lock_class_key *key)
1271 {
1272         struct ring_buffer *buffer;
1273         int bsize;
1274         int cpu, nr_pages;
1275
1276         /* keep it in its own cache line */
1277         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1278                          GFP_KERNEL);
1279         if (!buffer)
1280                 return NULL;
1281
1282         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1283                 goto fail_free_buffer;
1284
1285         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1286         buffer->flags = flags;
1287         buffer->clock = trace_clock_local;
1288         buffer->reader_lock_key = key;
1289
1290         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1291         init_waitqueue_head(&buffer->irq_work.waiters);
1292
1293         /* need at least two pages */
1294         if (nr_pages < 2)
1295                 nr_pages = 2;
1296
1297         /*
1298          * In case of non-hotplug cpu, if the ring-buffer is allocated
1299          * in early initcall, it will not be notified of secondary cpus.
1300          * In that off case, we need to allocate for all possible cpus.
1301          */
1302 #ifdef CONFIG_HOTPLUG_CPU
1303         get_online_cpus();
1304         cpumask_copy(buffer->cpumask, cpu_online_mask);
1305 #else
1306         cpumask_copy(buffer->cpumask, cpu_possible_mask);
1307 #endif
1308         buffer->cpus = nr_cpu_ids;
1309
1310         bsize = sizeof(void *) * nr_cpu_ids;
1311         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1312                                   GFP_KERNEL);
1313         if (!buffer->buffers)
1314                 goto fail_free_cpumask;
1315
1316         for_each_buffer_cpu(buffer, cpu) {
1317                 buffer->buffers[cpu] =
1318                         rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1319                 if (!buffer->buffers[cpu])
1320                         goto fail_free_buffers;
1321         }
1322
1323 #ifdef CONFIG_HOTPLUG_CPU
1324         buffer->cpu_notify.notifier_call = rb_cpu_notify;
1325         buffer->cpu_notify.priority = 0;
1326         register_cpu_notifier(&buffer->cpu_notify);
1327 #endif
1328
1329         put_online_cpus();
1330         mutex_init(&buffer->mutex);
1331
1332         return buffer;
1333
1334  fail_free_buffers:
1335         for_each_buffer_cpu(buffer, cpu) {
1336                 if (buffer->buffers[cpu])
1337                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1338         }
1339         kfree(buffer->buffers);
1340
1341  fail_free_cpumask:
1342         free_cpumask_var(buffer->cpumask);
1343         put_online_cpus();
1344
1345  fail_free_buffer:
1346         kfree(buffer);
1347         return NULL;
1348 }
1349 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1350
1351 /**
1352  * ring_buffer_free - free a ring buffer.
1353  * @buffer: the buffer to free.
1354  */
1355 void
1356 ring_buffer_free(struct ring_buffer *buffer)
1357 {
1358         int cpu;
1359
1360         get_online_cpus();
1361
1362 #ifdef CONFIG_HOTPLUG_CPU
1363         unregister_cpu_notifier(&buffer->cpu_notify);
1364 #endif
1365
1366         for_each_buffer_cpu(buffer, cpu)
1367                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1368
1369         put_online_cpus();
1370
1371         kfree(buffer->buffers);
1372         free_cpumask_var(buffer->cpumask);
1373
1374         kfree(buffer);
1375 }
1376 EXPORT_SYMBOL_GPL(ring_buffer_free);
1377
1378 void ring_buffer_set_clock(struct ring_buffer *buffer,
1379                            u64 (*clock)(void))
1380 {
1381         buffer->clock = clock;
1382 }
1383
1384 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1385
1386 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1387 {
1388         return local_read(&bpage->entries) & RB_WRITE_MASK;
1389 }
1390
1391 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1392 {
1393         return local_read(&bpage->write) & RB_WRITE_MASK;
1394 }
1395
1396 static int
1397 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1398 {
1399         struct list_head *tail_page, *to_remove, *next_page;
1400         struct buffer_page *to_remove_page, *tmp_iter_page;
1401         struct buffer_page *last_page, *first_page;
1402         unsigned int nr_removed;
1403         unsigned long head_bit;
1404         int page_entries;
1405
1406         head_bit = 0;
1407
1408         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1409         atomic_inc(&cpu_buffer->record_disabled);
1410         /*
1411          * We don't race with the readers since we have acquired the reader
1412          * lock. We also don't race with writers after disabling recording.
1413          * This makes it easy to figure out the first and the last page to be
1414          * removed from the list. We unlink all the pages in between including
1415          * the first and last pages. This is done in a busy loop so that we
1416          * lose the least number of traces.
1417          * The pages are freed after we restart recording and unlock readers.
1418          */
1419         tail_page = &cpu_buffer->tail_page->list;
1420
1421         /*
1422          * tail page might be on reader page, we remove the next page
1423          * from the ring buffer
1424          */
1425         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1426                 tail_page = rb_list_head(tail_page->next);
1427         to_remove = tail_page;
1428
1429         /* start of pages to remove */
1430         first_page = list_entry(rb_list_head(to_remove->next),
1431                                 struct buffer_page, list);
1432
1433         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1434                 to_remove = rb_list_head(to_remove)->next;
1435                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1436         }
1437
1438         next_page = rb_list_head(to_remove)->next;
1439
1440         /*
1441          * Now we remove all pages between tail_page and next_page.
1442          * Make sure that we have head_bit value preserved for the
1443          * next page
1444          */
1445         tail_page->next = (struct list_head *)((unsigned long)next_page |
1446                                                 head_bit);
1447         next_page = rb_list_head(next_page);
1448         next_page->prev = tail_page;
1449
1450         /* make sure pages points to a valid page in the ring buffer */
1451         cpu_buffer->pages = next_page;
1452
1453         /* update head page */
1454         if (head_bit)
1455                 cpu_buffer->head_page = list_entry(next_page,
1456                                                 struct buffer_page, list);
1457
1458         /*
1459          * change read pointer to make sure any read iterators reset
1460          * themselves
1461          */
1462         cpu_buffer->read = 0;
1463
1464         /* pages are removed, resume tracing and then free the pages */
1465         atomic_dec(&cpu_buffer->record_disabled);
1466         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1467
1468         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1469
1470         /* last buffer page to remove */
1471         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1472                                 list);
1473         tmp_iter_page = first_page;
1474
1475         do {
1476                 to_remove_page = tmp_iter_page;
1477                 rb_inc_page(cpu_buffer, &tmp_iter_page);
1478
1479                 /* update the counters */
1480                 page_entries = rb_page_entries(to_remove_page);
1481                 if (page_entries) {
1482                         /*
1483                          * If something was added to this page, it was full
1484                          * since it is not the tail page. So we deduct the
1485                          * bytes consumed in ring buffer from here.
1486                          * Increment overrun to account for the lost events.
1487                          */
1488                         local_add(page_entries, &cpu_buffer->overrun);
1489                         local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1490                 }
1491
1492                 /*
1493                  * We have already removed references to this list item, just
1494                  * free up the buffer_page and its page
1495                  */
1496                 free_buffer_page(to_remove_page);
1497                 nr_removed--;
1498
1499         } while (to_remove_page != last_page);
1500
1501         RB_WARN_ON(cpu_buffer, nr_removed);
1502
1503         return nr_removed == 0;
1504 }
1505
1506 static int
1507 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1508 {
1509         struct list_head *pages = &cpu_buffer->new_pages;
1510         int retries, success;
1511
1512         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1513         /*
1514          * We are holding the reader lock, so the reader page won't be swapped
1515          * in the ring buffer. Now we are racing with the writer trying to
1516          * move head page and the tail page.
1517          * We are going to adapt the reader page update process where:
1518          * 1. We first splice the start and end of list of new pages between
1519          *    the head page and its previous page.
1520          * 2. We cmpxchg the prev_page->next to point from head page to the
1521          *    start of new pages list.
1522          * 3. Finally, we update the head->prev to the end of new list.
1523          *
1524          * We will try this process 10 times, to make sure that we don't keep
1525          * spinning.
1526          */
1527         retries = 10;
1528         success = 0;
1529         while (retries--) {
1530                 struct list_head *head_page, *prev_page, *r;
1531                 struct list_head *last_page, *first_page;
1532                 struct list_head *head_page_with_bit;
1533
1534                 head_page = &rb_set_head_page(cpu_buffer)->list;
1535                 if (!head_page)
1536                         break;
1537                 prev_page = head_page->prev;
1538
1539                 first_page = pages->next;
1540                 last_page  = pages->prev;
1541
1542                 head_page_with_bit = (struct list_head *)
1543                                      ((unsigned long)head_page | RB_PAGE_HEAD);
1544
1545                 last_page->next = head_page_with_bit;
1546                 first_page->prev = prev_page;
1547
1548                 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1549
1550                 if (r == head_page_with_bit) {
1551                         /*
1552                          * yay, we replaced the page pointer to our new list,
1553                          * now, we just have to update to head page's prev
1554                          * pointer to point to end of list
1555                          */
1556                         head_page->prev = last_page;
1557                         success = 1;
1558                         break;
1559                 }
1560         }
1561
1562         if (success)
1563                 INIT_LIST_HEAD(pages);
1564         /*
1565          * If we weren't successful in adding in new pages, warn and stop
1566          * tracing
1567          */
1568         RB_WARN_ON(cpu_buffer, !success);
1569         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1570
1571         /* free pages if they weren't inserted */
1572         if (!success) {
1573                 struct buffer_page *bpage, *tmp;
1574                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1575                                          list) {
1576                         list_del_init(&bpage->list);
1577                         free_buffer_page(bpage);
1578                 }
1579         }
1580         return success;
1581 }
1582
1583 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1584 {
1585         int success;
1586
1587         if (cpu_buffer->nr_pages_to_update > 0)
1588                 success = rb_insert_pages(cpu_buffer);
1589         else
1590                 success = rb_remove_pages(cpu_buffer,
1591                                         -cpu_buffer->nr_pages_to_update);
1592
1593         if (success)
1594                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1595 }
1596
1597 static void update_pages_handler(struct work_struct *work)
1598 {
1599         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1600                         struct ring_buffer_per_cpu, update_pages_work);
1601         rb_update_pages(cpu_buffer);
1602         complete(&cpu_buffer->update_done);
1603 }
1604
1605 /**
1606  * ring_buffer_resize - resize the ring buffer
1607  * @buffer: the buffer to resize.
1608  * @size: the new size.
1609  *
1610  * Minimum size is 2 * BUF_PAGE_SIZE.
1611  *
1612  * Returns 0 on success and < 0 on failure.
1613  */
1614 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1615                         int cpu_id)
1616 {
1617         struct ring_buffer_per_cpu *cpu_buffer;
1618         unsigned nr_pages;
1619         int cpu, err = 0;
1620
1621         /*
1622          * Always succeed at resizing a non-existent buffer:
1623          */
1624         if (!buffer)
1625                 return size;
1626
1627         /* Make sure the requested buffer exists */
1628         if (cpu_id != RING_BUFFER_ALL_CPUS &&
1629             !cpumask_test_cpu(cpu_id, buffer->cpumask))
1630                 return size;
1631
1632         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1633         size *= BUF_PAGE_SIZE;
1634
1635         /* we need a minimum of two pages */
1636         if (size < BUF_PAGE_SIZE * 2)
1637                 size = BUF_PAGE_SIZE * 2;
1638
1639         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1640
1641         /*
1642          * Don't succeed if resizing is disabled, as a reader might be
1643          * manipulating the ring buffer and is expecting a sane state while
1644          * this is true.
1645          */
1646         if (atomic_read(&buffer->resize_disabled))
1647                 return -EBUSY;
1648
1649         /* prevent another thread from changing buffer sizes */
1650         mutex_lock(&buffer->mutex);
1651
1652         if (cpu_id == RING_BUFFER_ALL_CPUS) {
1653                 /* calculate the pages to update */
1654                 for_each_buffer_cpu(buffer, cpu) {
1655                         cpu_buffer = buffer->buffers[cpu];
1656
1657                         cpu_buffer->nr_pages_to_update = nr_pages -
1658                                                         cpu_buffer->nr_pages;
1659                         /*
1660                          * nothing more to do for removing pages or no update
1661                          */
1662                         if (cpu_buffer->nr_pages_to_update <= 0)
1663                                 continue;
1664                         /*
1665                          * to add pages, make sure all new pages can be
1666                          * allocated without receiving ENOMEM
1667                          */
1668                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1669                         if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1670                                                 &cpu_buffer->new_pages, cpu)) {
1671                                 /* not enough memory for new pages */
1672                                 err = -ENOMEM;
1673                                 goto out_err;
1674                         }
1675                 }
1676
1677                 get_online_cpus();
1678                 /*
1679                  * Fire off all the required work handlers
1680                  * We can't schedule on offline CPUs, but it's not necessary
1681                  * since we can change their buffer sizes without any race.
1682                  */
1683                 for_each_buffer_cpu(buffer, cpu) {
1684                         cpu_buffer = buffer->buffers[cpu];
1685                         if (!cpu_buffer->nr_pages_to_update)
1686                                 continue;
1687
1688                         /* The update must run on the CPU that is being updated. */
1689                         preempt_disable();
1690                         if (cpu == smp_processor_id() || !cpu_online(cpu)) {
1691                                 rb_update_pages(cpu_buffer);
1692                                 cpu_buffer->nr_pages_to_update = 0;
1693                         } else {
1694                                 /*
1695                                  * Can not disable preemption for schedule_work_on()
1696                                  * on PREEMPT_RT.
1697                                  */
1698                                 preempt_enable();
1699                                 schedule_work_on(cpu,
1700                                                 &cpu_buffer->update_pages_work);
1701                                 preempt_disable();
1702                         }
1703                         preempt_enable();
1704                 }
1705
1706                 /* wait for all the updates to complete */
1707                 for_each_buffer_cpu(buffer, cpu) {
1708                         cpu_buffer = buffer->buffers[cpu];
1709                         if (!cpu_buffer->nr_pages_to_update)
1710                                 continue;
1711
1712                         if (cpu_online(cpu))
1713                                 wait_for_completion(&cpu_buffer->update_done);
1714                         cpu_buffer->nr_pages_to_update = 0;
1715                 }
1716
1717                 put_online_cpus();
1718         } else {
1719                 /* Make sure this CPU has been intitialized */
1720                 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1721                         goto out;
1722
1723                 cpu_buffer = buffer->buffers[cpu_id];
1724
1725                 if (nr_pages == cpu_buffer->nr_pages)
1726                         goto out;
1727
1728                 cpu_buffer->nr_pages_to_update = nr_pages -
1729                                                 cpu_buffer->nr_pages;
1730
1731                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1732                 if (cpu_buffer->nr_pages_to_update > 0 &&
1733                         __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1734                                             &cpu_buffer->new_pages, cpu_id)) {
1735                         err = -ENOMEM;
1736                         goto out_err;
1737                 }
1738
1739                 get_online_cpus();
1740
1741                 preempt_disable();
1742                 /* The update must run on the CPU that is being updated. */
1743                 if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
1744                         rb_update_pages(cpu_buffer);
1745                 else {
1746                         /*
1747                          * Can not disable preemption for schedule_work_on()
1748                          * on PREEMPT_RT.
1749                          */
1750                         preempt_enable();
1751                         schedule_work_on(cpu_id,
1752                                          &cpu_buffer->update_pages_work);
1753                         wait_for_completion(&cpu_buffer->update_done);
1754                         preempt_disable();
1755                 }
1756                 preempt_enable();
1757
1758                 cpu_buffer->nr_pages_to_update = 0;
1759                 put_online_cpus();
1760         }
1761
1762  out:
1763         /*
1764          * The ring buffer resize can happen with the ring buffer
1765          * enabled, so that the update disturbs the tracing as little
1766          * as possible. But if the buffer is disabled, we do not need
1767          * to worry about that, and we can take the time to verify
1768          * that the buffer is not corrupt.
1769          */
1770         if (atomic_read(&buffer->record_disabled)) {
1771                 atomic_inc(&buffer->record_disabled);
1772                 /*
1773                  * Even though the buffer was disabled, we must make sure
1774                  * that it is truly disabled before calling rb_check_pages.
1775                  * There could have been a race between checking
1776                  * record_disable and incrementing it.
1777                  */
1778                 synchronize_sched();
1779                 for_each_buffer_cpu(buffer, cpu) {
1780                         cpu_buffer = buffer->buffers[cpu];
1781                         rb_check_pages(cpu_buffer);
1782                 }
1783                 atomic_dec(&buffer->record_disabled);
1784         }
1785
1786         mutex_unlock(&buffer->mutex);
1787         return size;
1788
1789  out_err:
1790         for_each_buffer_cpu(buffer, cpu) {
1791                 struct buffer_page *bpage, *tmp;
1792
1793                 cpu_buffer = buffer->buffers[cpu];
1794                 cpu_buffer->nr_pages_to_update = 0;
1795
1796                 if (list_empty(&cpu_buffer->new_pages))
1797                         continue;
1798
1799                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1800                                         list) {
1801                         list_del_init(&bpage->list);
1802                         free_buffer_page(bpage);
1803                 }
1804         }
1805         mutex_unlock(&buffer->mutex);
1806         return err;
1807 }
1808 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1809
1810 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1811 {
1812         mutex_lock(&buffer->mutex);
1813         if (val)
1814                 buffer->flags |= RB_FL_OVERWRITE;
1815         else
1816                 buffer->flags &= ~RB_FL_OVERWRITE;
1817         mutex_unlock(&buffer->mutex);
1818 }
1819 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1820
1821 static inline void *
1822 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1823 {
1824         return bpage->data + index;
1825 }
1826
1827 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1828 {
1829         return bpage->page->data + index;
1830 }
1831
1832 static inline struct ring_buffer_event *
1833 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1834 {
1835         return __rb_page_index(cpu_buffer->reader_page,
1836                                cpu_buffer->reader_page->read);
1837 }
1838
1839 static inline struct ring_buffer_event *
1840 rb_iter_head_event(struct ring_buffer_iter *iter)
1841 {
1842         return __rb_page_index(iter->head_page, iter->head);
1843 }
1844
1845 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1846 {
1847         return local_read(&bpage->page->commit);
1848 }
1849
1850 /* Size is determined by what has been committed */
1851 static inline unsigned rb_page_size(struct buffer_page *bpage)
1852 {
1853         return rb_page_commit(bpage);
1854 }
1855
1856 static inline unsigned
1857 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1858 {
1859         return rb_page_commit(cpu_buffer->commit_page);
1860 }
1861
1862 static inline unsigned
1863 rb_event_index(struct ring_buffer_event *event)
1864 {
1865         unsigned long addr = (unsigned long)event;
1866
1867         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1868 }
1869
1870 static inline int
1871 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1872                    struct ring_buffer_event *event)
1873 {
1874         unsigned long addr = (unsigned long)event;
1875         unsigned long index;
1876
1877         index = rb_event_index(event);
1878         addr &= PAGE_MASK;
1879
1880         return cpu_buffer->commit_page->page == (void *)addr &&
1881                 rb_commit_index(cpu_buffer) == index;
1882 }
1883
1884 static void
1885 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1886 {
1887         unsigned long max_count;
1888
1889         /*
1890          * We only race with interrupts and NMIs on this CPU.
1891          * If we own the commit event, then we can commit
1892          * all others that interrupted us, since the interruptions
1893          * are in stack format (they finish before they come
1894          * back to us). This allows us to do a simple loop to
1895          * assign the commit to the tail.
1896          */
1897  again:
1898         max_count = cpu_buffer->nr_pages * 100;
1899
1900         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1901                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1902                         return;
1903                 if (RB_WARN_ON(cpu_buffer,
1904                                rb_is_reader_page(cpu_buffer->tail_page)))
1905                         return;
1906                 local_set(&cpu_buffer->commit_page->page->commit,
1907                           rb_page_write(cpu_buffer->commit_page));
1908                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1909                 cpu_buffer->write_stamp =
1910                         cpu_buffer->commit_page->page->time_stamp;
1911                 /* add barrier to keep gcc from optimizing too much */
1912                 barrier();
1913         }
1914         while (rb_commit_index(cpu_buffer) !=
1915                rb_page_write(cpu_buffer->commit_page)) {
1916
1917                 local_set(&cpu_buffer->commit_page->page->commit,
1918                           rb_page_write(cpu_buffer->commit_page));
1919                 RB_WARN_ON(cpu_buffer,
1920                            local_read(&cpu_buffer->commit_page->page->commit) &
1921                            ~RB_WRITE_MASK);
1922                 barrier();
1923         }
1924
1925         /* again, keep gcc from optimizing */
1926         barrier();
1927
1928         /*
1929          * If an interrupt came in just after the first while loop
1930          * and pushed the tail page forward, we will be left with
1931          * a dangling commit that will never go forward.
1932          */
1933         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1934                 goto again;
1935 }
1936
1937 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1938 {
1939         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1940         cpu_buffer->reader_page->read = 0;
1941 }
1942
1943 static void rb_inc_iter(struct ring_buffer_iter *iter)
1944 {
1945         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1946
1947         /*
1948          * The iterator could be on the reader page (it starts there).
1949          * But the head could have moved, since the reader was
1950          * found. Check for this case and assign the iterator
1951          * to the head page instead of next.
1952          */
1953         if (iter->head_page == cpu_buffer->reader_page)
1954                 iter->head_page = rb_set_head_page(cpu_buffer);
1955         else
1956                 rb_inc_page(cpu_buffer, &iter->head_page);
1957
1958         iter->read_stamp = iter->head_page->page->time_stamp;
1959         iter->head = 0;
1960 }
1961
1962 /* Slow path, do not inline */
1963 static noinline struct ring_buffer_event *
1964 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1965 {
1966         event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1967
1968         /* Not the first event on the page? */
1969         if (rb_event_index(event)) {
1970                 event->time_delta = delta & TS_MASK;
1971                 event->array[0] = delta >> TS_SHIFT;
1972         } else {
1973                 /* nope, just zero it */
1974                 event->time_delta = 0;
1975                 event->array[0] = 0;
1976         }
1977
1978         return skip_time_extend(event);
1979 }
1980
1981 /**
1982  * rb_update_event - update event type and data
1983  * @event: the even to update
1984  * @type: the type of event
1985  * @length: the size of the event field in the ring buffer
1986  *
1987  * Update the type and data fields of the event. The length
1988  * is the actual size that is written to the ring buffer,
1989  * and with this, we can determine what to place into the
1990  * data field.
1991  */
1992 static void
1993 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1994                 struct ring_buffer_event *event, unsigned length,
1995                 int add_timestamp, u64 delta)
1996 {
1997         /* Only a commit updates the timestamp */
1998         if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1999                 delta = 0;
2000
2001         /*
2002          * If we need to add a timestamp, then we
2003          * add it to the start of the resevered space.
2004          */
2005         if (unlikely(add_timestamp)) {
2006                 event = rb_add_time_stamp(event, delta);
2007                 length -= RB_LEN_TIME_EXTEND;
2008                 delta = 0;
2009         }
2010
2011         event->time_delta = delta;
2012         length -= RB_EVNT_HDR_SIZE;
2013         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2014                 event->type_len = 0;
2015                 event->array[0] = length;
2016         } else
2017                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2018 }
2019
2020 /*
2021  * rb_handle_head_page - writer hit the head page
2022  *
2023  * Returns: +1 to retry page
2024  *           0 to continue
2025  *          -1 on error
2026  */
2027 static int
2028 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2029                     struct buffer_page *tail_page,
2030                     struct buffer_page *next_page)
2031 {
2032         struct buffer_page *new_head;
2033         int entries;
2034         int type;
2035         int ret;
2036
2037         entries = rb_page_entries(next_page);
2038
2039         /*
2040          * The hard part is here. We need to move the head
2041          * forward, and protect against both readers on
2042          * other CPUs and writers coming in via interrupts.
2043          */
2044         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2045                                        RB_PAGE_HEAD);
2046
2047         /*
2048          * type can be one of four:
2049          *  NORMAL - an interrupt already moved it for us
2050          *  HEAD   - we are the first to get here.
2051          *  UPDATE - we are the interrupt interrupting
2052          *           a current move.
2053          *  MOVED  - a reader on another CPU moved the next
2054          *           pointer to its reader page. Give up
2055          *           and try again.
2056          */
2057
2058         switch (type) {
2059         case RB_PAGE_HEAD:
2060                 /*
2061                  * We changed the head to UPDATE, thus
2062                  * it is our responsibility to update
2063                  * the counters.
2064                  */
2065                 local_add(entries, &cpu_buffer->overrun);
2066                 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2067
2068                 /*
2069                  * The entries will be zeroed out when we move the
2070                  * tail page.
2071                  */
2072
2073                 /* still more to do */
2074                 break;
2075
2076         case RB_PAGE_UPDATE:
2077                 /*
2078                  * This is an interrupt that interrupt the
2079                  * previous update. Still more to do.
2080                  */
2081                 break;
2082         case RB_PAGE_NORMAL:
2083                 /*
2084                  * An interrupt came in before the update
2085                  * and processed this for us.
2086                  * Nothing left to do.
2087                  */
2088                 return 1;
2089         case RB_PAGE_MOVED:
2090                 /*
2091                  * The reader is on another CPU and just did
2092                  * a swap with our next_page.
2093                  * Try again.
2094                  */
2095                 return 1;
2096         default:
2097                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2098                 return -1;
2099         }
2100
2101         /*
2102          * Now that we are here, the old head pointer is
2103          * set to UPDATE. This will keep the reader from
2104          * swapping the head page with the reader page.
2105          * The reader (on another CPU) will spin till
2106          * we are finished.
2107          *
2108          * We just need to protect against interrupts
2109          * doing the job. We will set the next pointer
2110          * to HEAD. After that, we set the old pointer
2111          * to NORMAL, but only if it was HEAD before.
2112          * otherwise we are an interrupt, and only
2113          * want the outer most commit to reset it.
2114          */
2115         new_head = next_page;
2116         rb_inc_page(cpu_buffer, &new_head);
2117
2118         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2119                                     RB_PAGE_NORMAL);
2120
2121         /*
2122          * Valid returns are:
2123          *  HEAD   - an interrupt came in and already set it.
2124          *  NORMAL - One of two things:
2125          *            1) We really set it.
2126          *            2) A bunch of interrupts came in and moved
2127          *               the page forward again.
2128          */
2129         switch (ret) {
2130         case RB_PAGE_HEAD:
2131         case RB_PAGE_NORMAL:
2132                 /* OK */
2133                 break;
2134         default:
2135                 RB_WARN_ON(cpu_buffer, 1);
2136                 return -1;
2137         }
2138
2139         /*
2140          * It is possible that an interrupt came in,
2141          * set the head up, then more interrupts came in
2142          * and moved it again. When we get back here,
2143          * the page would have been set to NORMAL but we
2144          * just set it back to HEAD.
2145          *
2146          * How do you detect this? Well, if that happened
2147          * the tail page would have moved.
2148          */
2149         if (ret == RB_PAGE_NORMAL) {
2150                 /*
2151                  * If the tail had moved passed next, then we need
2152                  * to reset the pointer.
2153                  */
2154                 if (cpu_buffer->tail_page != tail_page &&
2155                     cpu_buffer->tail_page != next_page)
2156                         rb_head_page_set_normal(cpu_buffer, new_head,
2157                                                 next_page,
2158                                                 RB_PAGE_HEAD);
2159         }
2160
2161         /*
2162          * If this was the outer most commit (the one that
2163          * changed the original pointer from HEAD to UPDATE),
2164          * then it is up to us to reset it to NORMAL.
2165          */
2166         if (type == RB_PAGE_HEAD) {
2167                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2168                                               tail_page,
2169                                               RB_PAGE_UPDATE);
2170                 if (RB_WARN_ON(cpu_buffer,
2171                                ret != RB_PAGE_UPDATE))
2172                         return -1;
2173         }
2174
2175         return 0;
2176 }
2177
2178 static unsigned rb_calculate_event_length(unsigned length)
2179 {
2180         struct ring_buffer_event event; /* Used only for sizeof array */
2181
2182         /* zero length can cause confusions */
2183         if (!length)
2184                 length = 1;
2185
2186         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2187                 length += sizeof(event.array[0]);
2188
2189         length += RB_EVNT_HDR_SIZE;
2190         length = ALIGN(length, RB_ARCH_ALIGNMENT);
2191
2192         return length;
2193 }
2194
2195 static inline void
2196 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2197               struct buffer_page *tail_page,
2198               unsigned long tail, unsigned long length)
2199 {
2200         struct ring_buffer_event *event;
2201
2202         /*
2203          * Only the event that crossed the page boundary
2204          * must fill the old tail_page with padding.
2205          */
2206         if (tail >= BUF_PAGE_SIZE) {
2207                 /*
2208                  * If the page was filled, then we still need
2209                  * to update the real_end. Reset it to zero
2210                  * and the reader will ignore it.
2211                  */
2212                 if (tail == BUF_PAGE_SIZE)
2213                         tail_page->real_end = 0;
2214
2215                 local_sub(length, &tail_page->write);
2216                 return;
2217         }
2218
2219         event = __rb_page_index(tail_page, tail);
2220         kmemcheck_annotate_bitfield(event, bitfield);
2221
2222         /* account for padding bytes */
2223         local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2224
2225         /*
2226          * Save the original length to the meta data.
2227          * This will be used by the reader to add lost event
2228          * counter.
2229          */
2230         tail_page->real_end = tail;
2231
2232         /*
2233          * If this event is bigger than the minimum size, then
2234          * we need to be careful that we don't subtract the
2235          * write counter enough to allow another writer to slip
2236          * in on this page.
2237          * We put in a discarded commit instead, to make sure
2238          * that this space is not used again.
2239          *
2240          * If we are less than the minimum size, we don't need to
2241          * worry about it.
2242          */
2243         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2244                 /* No room for any events */
2245
2246                 /* Mark the rest of the page with padding */
2247                 rb_event_set_padding(event);
2248
2249                 /* Set the write back to the previous setting */
2250                 local_sub(length, &tail_page->write);
2251                 return;
2252         }
2253
2254         /* Put in a discarded event */
2255         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2256         event->type_len = RINGBUF_TYPE_PADDING;
2257         /* time delta must be non zero */
2258         event->time_delta = 1;
2259
2260         /* Set write to end of buffer */
2261         length = (tail + length) - BUF_PAGE_SIZE;
2262         local_sub(length, &tail_page->write);
2263 }
2264
2265 /*
2266  * This is the slow path, force gcc not to inline it.
2267  */
2268 static noinline struct ring_buffer_event *
2269 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2270              unsigned long length, unsigned long tail,
2271              struct buffer_page *tail_page, u64 ts)
2272 {
2273         struct buffer_page *commit_page = cpu_buffer->commit_page;
2274         struct ring_buffer *buffer = cpu_buffer->buffer;
2275         struct buffer_page *next_page;
2276         int ret;
2277
2278         next_page = tail_page;
2279
2280         rb_inc_page(cpu_buffer, &next_page);
2281
2282         /*
2283          * If for some reason, we had an interrupt storm that made
2284          * it all the way around the buffer, bail, and warn
2285          * about it.
2286          */
2287         if (unlikely(next_page == commit_page)) {
2288                 local_inc(&cpu_buffer->commit_overrun);
2289                 goto out_reset;
2290         }
2291
2292         /*
2293          * This is where the fun begins!
2294          *
2295          * We are fighting against races between a reader that
2296          * could be on another CPU trying to swap its reader
2297          * page with the buffer head.
2298          *
2299          * We are also fighting against interrupts coming in and
2300          * moving the head or tail on us as well.
2301          *
2302          * If the next page is the head page then we have filled
2303          * the buffer, unless the commit page is still on the
2304          * reader page.
2305          */
2306         if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2307
2308                 /*
2309                  * If the commit is not on the reader page, then
2310                  * move the header page.
2311                  */
2312                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2313                         /*
2314                          * If we are not in overwrite mode,
2315                          * this is easy, just stop here.
2316                          */
2317                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
2318                                 local_inc(&cpu_buffer->dropped_events);
2319                                 goto out_reset;
2320                         }
2321
2322                         ret = rb_handle_head_page(cpu_buffer,
2323                                                   tail_page,
2324                                                   next_page);
2325                         if (ret < 0)
2326                                 goto out_reset;
2327                         if (ret)
2328                                 goto out_again;
2329                 } else {
2330                         /*
2331                          * We need to be careful here too. The
2332                          * commit page could still be on the reader
2333                          * page. We could have a small buffer, and
2334                          * have filled up the buffer with events
2335                          * from interrupts and such, and wrapped.
2336                          *
2337                          * Note, if the tail page is also the on the
2338                          * reader_page, we let it move out.
2339                          */
2340                         if (unlikely((cpu_buffer->commit_page !=
2341                                       cpu_buffer->tail_page) &&
2342                                      (cpu_buffer->commit_page ==
2343                                       cpu_buffer->reader_page))) {
2344                                 local_inc(&cpu_buffer->commit_overrun);
2345                                 goto out_reset;
2346                         }
2347                 }
2348         }
2349
2350         ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2351         if (ret) {
2352                 /*
2353                  * Nested commits always have zero deltas, so
2354                  * just reread the time stamp
2355                  */
2356                 ts = rb_time_stamp(buffer);
2357                 next_page->page->time_stamp = ts;
2358         }
2359
2360  out_again:
2361
2362         rb_reset_tail(cpu_buffer, tail_page, tail, length);
2363
2364         /* fail and let the caller try again */
2365         return ERR_PTR(-EAGAIN);
2366
2367  out_reset:
2368         /* reset write */
2369         rb_reset_tail(cpu_buffer, tail_page, tail, length);
2370
2371         return NULL;
2372 }
2373
2374 static struct ring_buffer_event *
2375 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2376                   unsigned long length, u64 ts,
2377                   u64 delta, int add_timestamp)
2378 {
2379         struct buffer_page *tail_page;
2380         struct ring_buffer_event *event;
2381         unsigned long tail, write;
2382
2383         /*
2384          * If the time delta since the last event is too big to
2385          * hold in the time field of the event, then we append a
2386          * TIME EXTEND event ahead of the data event.
2387          */
2388         if (unlikely(add_timestamp))
2389                 length += RB_LEN_TIME_EXTEND;
2390
2391         tail_page = cpu_buffer->tail_page;
2392         write = local_add_return(length, &tail_page->write);
2393
2394         /* set write to only the index of the write */
2395         write &= RB_WRITE_MASK;
2396         tail = write - length;
2397
2398         /*
2399          * If this is the first commit on the page, then it has the same
2400          * timestamp as the page itself.
2401          */
2402         if (!tail)
2403                 delta = 0;
2404
2405         /* See if we shot pass the end of this buffer page */
2406         if (unlikely(write > BUF_PAGE_SIZE))
2407                 return rb_move_tail(cpu_buffer, length, tail,
2408                                     tail_page, ts);
2409
2410         /* We reserved something on the buffer */
2411
2412         event = __rb_page_index(tail_page, tail);
2413         kmemcheck_annotate_bitfield(event, bitfield);
2414         rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2415
2416         local_inc(&tail_page->entries);
2417
2418         /*
2419          * If this is the first commit on the page, then update
2420          * its timestamp.
2421          */
2422         if (!tail)
2423                 tail_page->page->time_stamp = ts;
2424
2425         /* account for these added bytes */
2426         local_add(length, &cpu_buffer->entries_bytes);
2427
2428         return event;
2429 }
2430
2431 static inline int
2432 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2433                   struct ring_buffer_event *event)
2434 {
2435         unsigned long new_index, old_index;
2436         struct buffer_page *bpage;
2437         unsigned long index;
2438         unsigned long addr;
2439
2440         new_index = rb_event_index(event);
2441         old_index = new_index + rb_event_ts_length(event);
2442         addr = (unsigned long)event;
2443         addr &= PAGE_MASK;
2444
2445         bpage = cpu_buffer->tail_page;
2446
2447         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2448                 unsigned long write_mask =
2449                         local_read(&bpage->write) & ~RB_WRITE_MASK;
2450                 unsigned long event_length = rb_event_length(event);
2451                 /*
2452                  * This is on the tail page. It is possible that
2453                  * a write could come in and move the tail page
2454                  * and write to the next page. That is fine
2455                  * because we just shorten what is on this page.
2456                  */
2457                 old_index += write_mask;
2458                 new_index += write_mask;
2459                 index = local_cmpxchg(&bpage->write, old_index, new_index);
2460                 if (index == old_index) {
2461                         /* update counters */
2462                         local_sub(event_length, &cpu_buffer->entries_bytes);
2463                         return 1;
2464                 }
2465         }
2466
2467         /* could not discard */
2468         return 0;
2469 }
2470
2471 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2472 {
2473         local_inc(&cpu_buffer->committing);
2474         local_inc(&cpu_buffer->commits);
2475 }
2476
2477 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2478 {
2479         unsigned long commits;
2480
2481         if (RB_WARN_ON(cpu_buffer,
2482                        !local_read(&cpu_buffer->committing)))
2483                 return;
2484
2485  again:
2486         commits = local_read(&cpu_buffer->commits);
2487         /* synchronize with interrupts */
2488         barrier();
2489         if (local_read(&cpu_buffer->committing) == 1)
2490                 rb_set_commit_to_write(cpu_buffer);
2491
2492         local_dec(&cpu_buffer->committing);
2493
2494         /* synchronize with interrupts */
2495         barrier();
2496
2497         /*
2498          * Need to account for interrupts coming in between the
2499          * updating of the commit page and the clearing of the
2500          * committing counter.
2501          */
2502         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2503             !local_read(&cpu_buffer->committing)) {
2504                 local_inc(&cpu_buffer->committing);
2505                 goto again;
2506         }
2507 }
2508
2509 static struct ring_buffer_event *
2510 rb_reserve_next_event(struct ring_buffer *buffer,
2511                       struct ring_buffer_per_cpu *cpu_buffer,
2512                       unsigned long length)
2513 {
2514         struct ring_buffer_event *event;
2515         u64 ts, delta;
2516         int nr_loops = 0;
2517         int add_timestamp;
2518         u64 diff;
2519
2520         rb_start_commit(cpu_buffer);
2521
2522 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2523         /*
2524          * Due to the ability to swap a cpu buffer from a buffer
2525          * it is possible it was swapped before we committed.
2526          * (committing stops a swap). We check for it here and
2527          * if it happened, we have to fail the write.
2528          */
2529         barrier();
2530         if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2531                 local_dec(&cpu_buffer->committing);
2532                 local_dec(&cpu_buffer->commits);
2533                 return NULL;
2534         }
2535 #endif
2536
2537         length = rb_calculate_event_length(length);
2538  again:
2539         add_timestamp = 0;
2540         delta = 0;
2541
2542         /*
2543          * We allow for interrupts to reenter here and do a trace.
2544          * If one does, it will cause this original code to loop
2545          * back here. Even with heavy interrupts happening, this
2546          * should only happen a few times in a row. If this happens
2547          * 1000 times in a row, there must be either an interrupt
2548          * storm or we have something buggy.
2549          * Bail!
2550          */
2551         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2552                 goto out_fail;
2553
2554         ts = rb_time_stamp(cpu_buffer->buffer);
2555         diff = ts - cpu_buffer->write_stamp;
2556
2557         /* make sure this diff is calculated here */
2558         barrier();
2559
2560         /* Did the write stamp get updated already? */
2561         if (likely(ts >= cpu_buffer->write_stamp)) {
2562                 delta = diff;
2563                 if (unlikely(test_time_stamp(delta))) {
2564                         int local_clock_stable = 1;
2565 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2566                         local_clock_stable = sched_clock_stable;
2567 #endif
2568                         WARN_ONCE(delta > (1ULL << 59),
2569                                   KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2570                                   (unsigned long long)delta,
2571                                   (unsigned long long)ts,
2572                                   (unsigned long long)cpu_buffer->write_stamp,
2573                                   local_clock_stable ? "" :
2574                                   "If you just came from a suspend/resume,\n"
2575                                   "please switch to the trace global clock:\n"
2576                                   "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2577                         add_timestamp = 1;
2578                 }
2579         }
2580
2581         event = __rb_reserve_next(cpu_buffer, length, ts,
2582                                   delta, add_timestamp);
2583         if (unlikely(PTR_ERR(event) == -EAGAIN))
2584                 goto again;
2585
2586         if (!event)
2587                 goto out_fail;
2588
2589         return event;
2590
2591  out_fail:
2592         rb_end_commit(cpu_buffer);
2593         return NULL;
2594 }
2595
2596 #ifdef CONFIG_TRACING
2597
2598 /*
2599  * The lock and unlock are done within a preempt disable section.
2600  * The current_context per_cpu variable can only be modified
2601  * by the current task between lock and unlock. But it can
2602  * be modified more than once via an interrupt. To pass this
2603  * information from the lock to the unlock without having to
2604  * access the 'in_interrupt()' functions again (which do show
2605  * a bit of overhead in something as critical as function tracing,
2606  * we use a bitmask trick.
2607  *
2608  *  bit 0 =  NMI context
2609  *  bit 1 =  IRQ context
2610  *  bit 2 =  SoftIRQ context
2611  *  bit 3 =  normal context.
2612  *
2613  * This works because this is the order of contexts that can
2614  * preempt other contexts. A SoftIRQ never preempts an IRQ
2615  * context.
2616  *
2617  * When the context is determined, the corresponding bit is
2618  * checked and set (if it was set, then a recursion of that context
2619  * happened).
2620  *
2621  * On unlock, we need to clear this bit. To do so, just subtract
2622  * 1 from the current_context and AND it to itself.
2623  *
2624  * (binary)
2625  *  101 - 1 = 100
2626  *  101 & 100 = 100 (clearing bit zero)
2627  *
2628  *  1010 - 1 = 1001
2629  *  1010 & 1001 = 1000 (clearing bit 1)
2630  *
2631  * The least significant bit can be cleared this way, and it
2632  * just so happens that it is the same bit corresponding to
2633  * the current context.
2634  */
2635 static DEFINE_PER_CPU(unsigned int, current_context);
2636
2637 static __always_inline int trace_recursive_lock(void)
2638 {
2639         unsigned int val = this_cpu_read(current_context);
2640         int bit;
2641
2642         if (in_interrupt()) {
2643                 if (in_nmi())
2644                         bit = 0;
2645                 else if (in_irq())
2646                         bit = 1;
2647                 else
2648                         bit = 2;
2649         } else
2650                 bit = 3;
2651
2652         if (unlikely(val & (1 << bit)))
2653                 return 1;
2654
2655         val |= (1 << bit);
2656         this_cpu_write(current_context, val);
2657
2658         return 0;
2659 }
2660
2661 static __always_inline void trace_recursive_unlock(void)
2662 {
2663         unsigned int val = this_cpu_read(current_context);
2664
2665         val--;
2666         val &= this_cpu_read(current_context);
2667         this_cpu_write(current_context, val);
2668 }
2669
2670 #else
2671
2672 #define trace_recursive_lock()          (0)
2673 #define trace_recursive_unlock()        do { } while (0)
2674
2675 #endif
2676
2677 /**
2678  * ring_buffer_lock_reserve - reserve a part of the buffer
2679  * @buffer: the ring buffer to reserve from
2680  * @length: the length of the data to reserve (excluding event header)
2681  *
2682  * Returns a reseverd event on the ring buffer to copy directly to.
2683  * The user of this interface will need to get the body to write into
2684  * and can use the ring_buffer_event_data() interface.
2685  *
2686  * The length is the length of the data needed, not the event length
2687  * which also includes the event header.
2688  *
2689  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2690  * If NULL is returned, then nothing has been allocated or locked.
2691  */
2692 struct ring_buffer_event *
2693 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2694 {
2695         struct ring_buffer_per_cpu *cpu_buffer;
2696         struct ring_buffer_event *event;
2697         int cpu;
2698
2699         if (ring_buffer_flags != RB_BUFFERS_ON)
2700                 return NULL;
2701
2702         /* If we are tracing schedule, we don't want to recurse */
2703         preempt_disable_notrace();
2704
2705         if (atomic_read(&buffer->record_disabled))
2706                 goto out_nocheck;
2707
2708         if (trace_recursive_lock())
2709                 goto out_nocheck;
2710
2711         cpu = raw_smp_processor_id();
2712
2713         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2714                 goto out;
2715
2716         cpu_buffer = buffer->buffers[cpu];
2717
2718         if (atomic_read(&cpu_buffer->record_disabled))
2719                 goto out;
2720
2721         if (length > BUF_MAX_DATA_SIZE)
2722                 goto out;
2723
2724         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2725         if (!event)
2726                 goto out;
2727
2728         return event;
2729
2730  out:
2731         trace_recursive_unlock();
2732
2733  out_nocheck:
2734         preempt_enable_notrace();
2735         return NULL;
2736 }
2737 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2738
2739 static void
2740 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2741                       struct ring_buffer_event *event)
2742 {
2743         u64 delta;
2744
2745         /*
2746          * The event first in the commit queue updates the
2747          * time stamp.
2748          */
2749         if (rb_event_is_commit(cpu_buffer, event)) {
2750                 /*
2751                  * A commit event that is first on a page
2752                  * updates the write timestamp with the page stamp
2753                  */
2754                 if (!rb_event_index(event))
2755                         cpu_buffer->write_stamp =
2756                                 cpu_buffer->commit_page->page->time_stamp;
2757                 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2758                         delta = event->array[0];
2759                         delta <<= TS_SHIFT;
2760                         delta += event->time_delta;
2761                         cpu_buffer->write_stamp += delta;
2762                 } else
2763                         cpu_buffer->write_stamp += event->time_delta;
2764         }
2765 }
2766
2767 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2768                       struct ring_buffer_event *event)
2769 {
2770         local_inc(&cpu_buffer->entries);
2771         rb_update_write_stamp(cpu_buffer, event);
2772         rb_end_commit(cpu_buffer);
2773 }
2774
2775 static __always_inline void
2776 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2777 {
2778         if (buffer->irq_work.waiters_pending) {
2779                 buffer->irq_work.waiters_pending = false;
2780                 /* irq_work_queue() supplies it's own memory barriers */
2781                 irq_work_queue(&buffer->irq_work.work);
2782         }
2783
2784         if (cpu_buffer->irq_work.waiters_pending) {
2785                 cpu_buffer->irq_work.waiters_pending = false;
2786                 /* irq_work_queue() supplies it's own memory barriers */
2787                 irq_work_queue(&cpu_buffer->irq_work.work);
2788         }
2789 }
2790
2791 /**
2792  * ring_buffer_unlock_commit - commit a reserved
2793  * @buffer: The buffer to commit to
2794  * @event: The event pointer to commit.
2795  *
2796  * This commits the data to the ring buffer, and releases any locks held.
2797  *
2798  * Must be paired with ring_buffer_lock_reserve.
2799  */
2800 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2801                               struct ring_buffer_event *event)
2802 {
2803         struct ring_buffer_per_cpu *cpu_buffer;
2804         int cpu = raw_smp_processor_id();
2805
2806         cpu_buffer = buffer->buffers[cpu];
2807
2808         rb_commit(cpu_buffer, event);
2809
2810         rb_wakeups(buffer, cpu_buffer);
2811
2812         trace_recursive_unlock();
2813
2814         preempt_enable_notrace();
2815
2816         return 0;
2817 }
2818 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2819
2820 static inline void rb_event_discard(struct ring_buffer_event *event)
2821 {
2822         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2823                 event = skip_time_extend(event);
2824
2825         /* array[0] holds the actual length for the discarded event */
2826         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2827         event->type_len = RINGBUF_TYPE_PADDING;
2828         /* time delta must be non zero */
2829         if (!event->time_delta)
2830                 event->time_delta = 1;
2831 }
2832
2833 /*
2834  * Decrement the entries to the page that an event is on.
2835  * The event does not even need to exist, only the pointer
2836  * to the page it is on. This may only be called before the commit
2837  * takes place.
2838  */
2839 static inline void
2840 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2841                    struct ring_buffer_event *event)
2842 {
2843         unsigned long addr = (unsigned long)event;
2844         struct buffer_page *bpage = cpu_buffer->commit_page;
2845         struct buffer_page *start;
2846
2847         addr &= PAGE_MASK;
2848
2849         /* Do the likely case first */
2850         if (likely(bpage->page == (void *)addr)) {
2851                 local_dec(&bpage->entries);
2852                 return;
2853         }
2854
2855         /*
2856          * Because the commit page may be on the reader page we
2857          * start with the next page and check the end loop there.
2858          */
2859         rb_inc_page(cpu_buffer, &bpage);
2860         start = bpage;
2861         do {
2862                 if (bpage->page == (void *)addr) {
2863                         local_dec(&bpage->entries);
2864                         return;
2865                 }
2866                 rb_inc_page(cpu_buffer, &bpage);
2867         } while (bpage != start);
2868
2869         /* commit not part of this buffer?? */
2870         RB_WARN_ON(cpu_buffer, 1);
2871 }
2872
2873 /**
2874  * ring_buffer_commit_discard - discard an event that has not been committed
2875  * @buffer: the ring buffer
2876  * @event: non committed event to discard
2877  *
2878  * Sometimes an event that is in the ring buffer needs to be ignored.
2879  * This function lets the user discard an event in the ring buffer
2880  * and then that event will not be read later.
2881  *
2882  * This function only works if it is called before the the item has been
2883  * committed. It will try to free the event from the ring buffer
2884  * if another event has not been added behind it.
2885  *
2886  * If another event has been added behind it, it will set the event
2887  * up as discarded, and perform the commit.
2888  *
2889  * If this function is called, do not call ring_buffer_unlock_commit on
2890  * the event.
2891  */
2892 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2893                                 struct ring_buffer_event *event)
2894 {
2895         struct ring_buffer_per_cpu *cpu_buffer;
2896         int cpu;
2897
2898         /* The event is discarded regardless */
2899         rb_event_discard(event);
2900
2901         cpu = smp_processor_id();
2902         cpu_buffer = buffer->buffers[cpu];
2903
2904         /*
2905          * This must only be called if the event has not been
2906          * committed yet. Thus we can assume that preemption
2907          * is still disabled.
2908          */
2909         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2910
2911         rb_decrement_entry(cpu_buffer, event);
2912         if (rb_try_to_discard(cpu_buffer, event))
2913                 goto out;
2914
2915         /*
2916          * The commit is still visible by the reader, so we
2917          * must still update the timestamp.
2918          */
2919         rb_update_write_stamp(cpu_buffer, event);
2920  out:
2921         rb_end_commit(cpu_buffer);
2922
2923         trace_recursive_unlock();
2924
2925         preempt_enable_notrace();
2926
2927 }
2928 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2929
2930 /**
2931  * ring_buffer_write - write data to the buffer without reserving
2932  * @buffer: The ring buffer to write to.
2933  * @length: The length of the data being written (excluding the event header)
2934  * @data: The data to write to the buffer.
2935  *
2936  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2937  * one function. If you already have the data to write to the buffer, it
2938  * may be easier to simply call this function.
2939  *
2940  * Note, like ring_buffer_lock_reserve, the length is the length of the data
2941  * and not the length of the event which would hold the header.
2942  */
2943 int ring_buffer_write(struct ring_buffer *buffer,
2944                       unsigned long length,
2945                       void *data)
2946 {
2947         struct ring_buffer_per_cpu *cpu_buffer;
2948         struct ring_buffer_event *event;
2949         void *body;
2950         int ret = -EBUSY;
2951         int cpu;
2952
2953         if (ring_buffer_flags != RB_BUFFERS_ON)
2954                 return -EBUSY;
2955
2956         preempt_disable_notrace();
2957
2958         if (atomic_read(&buffer->record_disabled))
2959                 goto out;
2960
2961         cpu = raw_smp_processor_id();
2962
2963         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2964                 goto out;
2965
2966         cpu_buffer = buffer->buffers[cpu];
2967
2968         if (atomic_read(&cpu_buffer->record_disabled))
2969                 goto out;
2970
2971         if (length > BUF_MAX_DATA_SIZE)
2972                 goto out;
2973
2974         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2975         if (!event)
2976                 goto out;
2977
2978         body = rb_event_data(event);
2979
2980         memcpy(body, data, length);
2981
2982         rb_commit(cpu_buffer, event);
2983
2984         rb_wakeups(buffer, cpu_buffer);
2985
2986         ret = 0;
2987  out:
2988         preempt_enable_notrace();
2989
2990         return ret;
2991 }
2992 EXPORT_SYMBOL_GPL(ring_buffer_write);
2993
2994 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2995 {
2996         struct buffer_page *reader = cpu_buffer->reader_page;
2997         struct buffer_page *head = rb_set_head_page(cpu_buffer);
2998         struct buffer_page *commit = cpu_buffer->commit_page;
2999
3000         /* In case of error, head will be NULL */
3001         if (unlikely(!head))
3002                 return 1;
3003
3004         return reader->read == rb_page_commit(reader) &&
3005                 (commit == reader ||
3006                  (commit == head &&
3007                   head->read == rb_page_commit(commit)));
3008 }
3009
3010 /**
3011  * ring_buffer_record_disable - stop all writes into the buffer
3012  * @buffer: The ring buffer to stop writes to.
3013  *
3014  * This prevents all writes to the buffer. Any attempt to write
3015  * to the buffer after this will fail and return NULL.
3016  *
3017  * The caller should call synchronize_sched() after this.
3018  */
3019 void ring_buffer_record_disable(struct ring_buffer *buffer)
3020 {
3021         atomic_inc(&buffer->record_disabled);
3022 }
3023 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3024
3025 /**
3026  * ring_buffer_record_enable - enable writes to the buffer
3027  * @buffer: The ring buffer to enable writes
3028  *
3029  * Note, multiple disables will need the same number of enables
3030  * to truly enable the writing (much like preempt_disable).
3031  */
3032 void ring_buffer_record_enable(struct ring_buffer *buffer)
3033 {
3034         atomic_dec(&buffer->record_disabled);
3035 }
3036 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3037
3038 /**
3039  * ring_buffer_record_off - stop all writes into the buffer
3040  * @buffer: The ring buffer to stop writes to.
3041  *
3042  * This prevents all writes to the buffer. Any attempt to write
3043  * to the buffer after this will fail and return NULL.
3044  *
3045  * This is different than ring_buffer_record_disable() as
3046  * it works like an on/off switch, where as the disable() version
3047  * must be paired with a enable().
3048  */
3049 void ring_buffer_record_off(struct ring_buffer *buffer)
3050 {
3051         unsigned int rd;
3052         unsigned int new_rd;
3053
3054         do {
3055                 rd = atomic_read(&buffer->record_disabled);
3056                 new_rd = rd | RB_BUFFER_OFF;
3057         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3058 }
3059 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3060
3061 /**
3062  * ring_buffer_record_on - restart writes into the buffer
3063  * @buffer: The ring buffer to start writes to.
3064  *
3065  * This enables all writes to the buffer that was disabled by
3066  * ring_buffer_record_off().
3067  *
3068  * This is different than ring_buffer_record_enable() as
3069  * it works like an on/off switch, where as the enable() version
3070  * must be paired with a disable().
3071  */
3072 void ring_buffer_record_on(struct ring_buffer *buffer)
3073 {
3074         unsigned int rd;
3075         unsigned int new_rd;
3076
3077         do {
3078                 rd = atomic_read(&buffer->record_disabled);
3079                 new_rd = rd & ~RB_BUFFER_OFF;
3080         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3081 }
3082 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3083
3084 /**
3085  * ring_buffer_record_is_on - return true if the ring buffer can write
3086  * @buffer: The ring buffer to see if write is enabled
3087  *
3088  * Returns true if the ring buffer is in a state that it accepts writes.
3089  */
3090 int ring_buffer_record_is_on(struct ring_buffer *buffer)
3091 {
3092         return !atomic_read(&buffer->record_disabled);
3093 }
3094
3095 /**
3096  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3097  * @buffer: The ring buffer to stop writes to.
3098  * @cpu: The CPU buffer to stop
3099  *
3100  * This prevents all writes to the buffer. Any attempt to write
3101  * to the buffer after this will fail and return NULL.
3102  *
3103  * The caller should call synchronize_sched() after this.
3104  */
3105 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3106 {
3107         struct ring_buffer_per_cpu *cpu_buffer;
3108
3109         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3110                 return;
3111
3112         cpu_buffer = buffer->buffers[cpu];
3113         atomic_inc(&cpu_buffer->record_disabled);
3114 }
3115 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3116
3117 /**
3118  * ring_buffer_record_enable_cpu - enable writes to the buffer
3119  * @buffer: The ring buffer to enable writes
3120  * @cpu: The CPU to enable.
3121  *
3122  * Note, multiple disables will need the same number of enables
3123  * to truly enable the writing (much like preempt_disable).
3124  */
3125 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3126 {
3127         struct ring_buffer_per_cpu *cpu_buffer;
3128
3129         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3130                 return;
3131
3132         cpu_buffer = buffer->buffers[cpu];
3133         atomic_dec(&cpu_buffer->record_disabled);
3134 }
3135 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3136
3137 /*
3138  * The total entries in the ring buffer is the running counter
3139  * of entries entered into the ring buffer, minus the sum of
3140  * the entries read from the ring buffer and the number of
3141  * entries that were overwritten.
3142  */
3143 static inline unsigned long
3144 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3145 {
3146         return local_read(&cpu_buffer->entries) -
3147                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3148 }
3149
3150 /**
3151  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3152  * @buffer: The ring buffer
3153  * @cpu: The per CPU buffer to read from.
3154  */
3155 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3156 {
3157         unsigned long flags;
3158         struct ring_buffer_per_cpu *cpu_buffer;
3159         struct buffer_page *bpage;
3160         u64 ret = 0;
3161
3162         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3163                 return 0;
3164
3165         cpu_buffer = buffer->buffers[cpu];
3166         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3167         /*
3168          * if the tail is on reader_page, oldest time stamp is on the reader
3169          * page
3170          */
3171         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3172                 bpage = cpu_buffer->reader_page;
3173         else
3174                 bpage = rb_set_head_page(cpu_buffer);
3175         if (bpage)
3176                 ret = bpage->page->time_stamp;
3177         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3178
3179         return ret;
3180 }
3181 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3182
3183 /**
3184  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3185  * @buffer: The ring buffer
3186  * @cpu: The per CPU buffer to read from.
3187  */
3188 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3189 {
3190         struct ring_buffer_per_cpu *cpu_buffer;
3191         unsigned long ret;
3192
3193         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3194                 return 0;
3195
3196         cpu_buffer = buffer->buffers[cpu];
3197         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3198
3199         return ret;
3200 }
3201 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3202
3203 /**
3204  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3205  * @buffer: The ring buffer
3206  * @cpu: The per CPU buffer to get the entries from.
3207  */
3208 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3209 {
3210         struct ring_buffer_per_cpu *cpu_buffer;
3211
3212         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3213                 return 0;
3214
3215         cpu_buffer = buffer->buffers[cpu];
3216
3217         return rb_num_of_entries(cpu_buffer);
3218 }
3219 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3220
3221 /**
3222  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3223  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3224  * @buffer: The ring buffer
3225  * @cpu: The per CPU buffer to get the number of overruns from
3226  */
3227 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3228 {
3229         struct ring_buffer_per_cpu *cpu_buffer;
3230         unsigned long ret;
3231
3232         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3233                 return 0;
3234
3235         cpu_buffer = buffer->buffers[cpu];
3236         ret = local_read(&cpu_buffer->overrun);
3237
3238         return ret;
3239 }
3240 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3241
3242 /**
3243  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3244  * commits failing due to the buffer wrapping around while there are uncommitted
3245  * events, such as during an interrupt storm.
3246  * @buffer: The ring buffer
3247  * @cpu: The per CPU buffer to get the number of overruns from
3248  */
3249 unsigned long
3250 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3251 {
3252         struct ring_buffer_per_cpu *cpu_buffer;
3253         unsigned long ret;
3254
3255         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3256                 return 0;
3257
3258         cpu_buffer = buffer->buffers[cpu];
3259         ret = local_read(&cpu_buffer->commit_overrun);
3260
3261         return ret;
3262 }
3263 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3264
3265 /**
3266  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3267  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3268  * @buffer: The ring buffer
3269  * @cpu: The per CPU buffer to get the number of overruns from
3270  */
3271 unsigned long
3272 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3273 {
3274         struct ring_buffer_per_cpu *cpu_buffer;
3275         unsigned long ret;
3276
3277         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3278                 return 0;
3279
3280         cpu_buffer = buffer->buffers[cpu];
3281         ret = local_read(&cpu_buffer->dropped_events);
3282
3283         return ret;
3284 }
3285 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3286
3287 /**
3288  * ring_buffer_read_events_cpu - get the number of events successfully read
3289  * @buffer: The ring buffer
3290  * @cpu: The per CPU buffer to get the number of events read
3291  */
3292 unsigned long
3293 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3294 {
3295         struct ring_buffer_per_cpu *cpu_buffer;
3296
3297         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3298                 return 0;
3299
3300         cpu_buffer = buffer->buffers[cpu];
3301         return cpu_buffer->read;
3302 }
3303 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3304
3305 /**
3306  * ring_buffer_entries - get the number of entries in a buffer
3307  * @buffer: The ring buffer
3308  *
3309  * Returns the total number of entries in the ring buffer
3310  * (all CPU entries)
3311  */
3312 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3313 {
3314         struct ring_buffer_per_cpu *cpu_buffer;
3315         unsigned long entries = 0;
3316         int cpu;
3317
3318         /* if you care about this being correct, lock the buffer */
3319         for_each_buffer_cpu(buffer, cpu) {
3320                 cpu_buffer = buffer->buffers[cpu];
3321                 entries += rb_num_of_entries(cpu_buffer);
3322         }
3323
3324         return entries;
3325 }
3326 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3327
3328 /**
3329  * ring_buffer_overruns - get the number of overruns in buffer
3330  * @buffer: The ring buffer
3331  *
3332  * Returns the total number of overruns in the ring buffer
3333  * (all CPU entries)
3334  */
3335 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3336 {
3337         struct ring_buffer_per_cpu *cpu_buffer;
3338         unsigned long overruns = 0;
3339         int cpu;
3340
3341         /* if you care about this being correct, lock the buffer */
3342         for_each_buffer_cpu(buffer, cpu) {
3343                 cpu_buffer = buffer->buffers[cpu];
3344                 overruns += local_read(&cpu_buffer->overrun);
3345         }
3346
3347         return overruns;
3348 }
3349 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3350
3351 static void rb_iter_reset(struct ring_buffer_iter *iter)
3352 {
3353         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3354
3355         /* Iterator usage is expected to have record disabled */
3356         if (list_empty(&cpu_buffer->reader_page->list)) {
3357                 iter->head_page = rb_set_head_page(cpu_buffer);
3358                 if (unlikely(!iter->head_page))
3359                         return;
3360                 iter->head = iter->head_page->read;
3361         } else {
3362                 iter->head_page = cpu_buffer->reader_page;
3363                 iter->head = cpu_buffer->reader_page->read;
3364         }
3365         if (iter->head)
3366                 iter->read_stamp = cpu_buffer->read_stamp;
3367         else
3368                 iter->read_stamp = iter->head_page->page->time_stamp;
3369         iter->cache_reader_page = cpu_buffer->reader_page;
3370         iter->cache_read = cpu_buffer->read;
3371 }
3372
3373 /**
3374  * ring_buffer_iter_reset - reset an iterator
3375  * @iter: The iterator to reset
3376  *
3377  * Resets the iterator, so that it will start from the beginning
3378  * again.
3379  */
3380 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3381 {
3382         struct ring_buffer_per_cpu *cpu_buffer;
3383         unsigned long flags;
3384
3385         if (!iter)
3386                 return;
3387
3388         cpu_buffer = iter->cpu_buffer;
3389
3390         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3391         rb_iter_reset(iter);
3392         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3393 }
3394 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3395
3396 /**
3397  * ring_buffer_iter_empty - check if an iterator has no more to read
3398  * @iter: The iterator to check
3399  */
3400 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3401 {
3402         struct ring_buffer_per_cpu *cpu_buffer;
3403
3404         cpu_buffer = iter->cpu_buffer;
3405
3406         return iter->head_page == cpu_buffer->commit_page &&
3407                 iter->head == rb_commit_index(cpu_buffer);
3408 }
3409 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3410
3411 static void
3412 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3413                      struct ring_buffer_event *event)
3414 {
3415         u64 delta;
3416
3417         switch (event->type_len) {
3418         case RINGBUF_TYPE_PADDING:
3419                 return;
3420
3421         case RINGBUF_TYPE_TIME_EXTEND:
3422                 delta = event->array[0];
3423                 delta <<= TS_SHIFT;
3424                 delta += event->time_delta;
3425                 cpu_buffer->read_stamp += delta;
3426                 return;
3427
3428         case RINGBUF_TYPE_TIME_STAMP:
3429                 /* FIXME: not implemented */
3430                 return;
3431
3432         case RINGBUF_TYPE_DATA:
3433                 cpu_buffer->read_stamp += event->time_delta;
3434                 return;
3435
3436         default:
3437                 BUG();
3438         }
3439         return;
3440 }
3441
3442 static void
3443 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3444                           struct ring_buffer_event *event)
3445 {
3446         u64 delta;
3447
3448         switch (event->type_len) {
3449         case RINGBUF_TYPE_PADDING:
3450                 return;
3451
3452         case RINGBUF_TYPE_TIME_EXTEND:
3453                 delta = event->array[0];
3454                 delta <<= TS_SHIFT;
3455                 delta += event->time_delta;
3456                 iter->read_stamp += delta;
3457                 return;
3458
3459         case RINGBUF_TYPE_TIME_STAMP:
3460                 /* FIXME: not implemented */
3461                 return;
3462
3463         case RINGBUF_TYPE_DATA:
3464                 iter->read_stamp += event->time_delta;
3465                 return;
3466
3467         default:
3468                 BUG();
3469         }
3470         return;
3471 }
3472
3473 static struct buffer_page *
3474 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3475 {
3476         struct buffer_page *reader = NULL;
3477         unsigned long overwrite;
3478         unsigned long flags;
3479         int nr_loops = 0;
3480         int ret;
3481
3482         local_irq_save(flags);
3483         arch_spin_lock(&cpu_buffer->lock);
3484
3485  again:
3486         /*
3487          * This should normally only loop twice. But because the
3488          * start of the reader inserts an empty page, it causes
3489          * a case where we will loop three times. There should be no
3490          * reason to loop four times (that I know of).
3491          */
3492         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3493                 reader = NULL;
3494                 goto out;
3495         }
3496
3497         reader = cpu_buffer->reader_page;
3498
3499         /* If there's more to read, return this page */
3500         if (cpu_buffer->reader_page->read < rb_page_size(reader))
3501                 goto out;
3502
3503         /* Never should we have an index greater than the size */
3504         if (RB_WARN_ON(cpu_buffer,
3505                        cpu_buffer->reader_page->read > rb_page_size(reader)))
3506                 goto out;
3507
3508         /* check if we caught up to the tail */
3509         reader = NULL;
3510         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3511                 goto out;
3512
3513         /* Don't bother swapping if the ring buffer is empty */
3514         if (rb_num_of_entries(cpu_buffer) == 0)
3515                 goto out;
3516
3517         /*
3518          * Reset the reader page to size zero.
3519          */
3520         local_set(&cpu_buffer->reader_page->write, 0);
3521         local_set(&cpu_buffer->reader_page->entries, 0);
3522         local_set(&cpu_buffer->reader_page->page->commit, 0);
3523         cpu_buffer->reader_page->real_end = 0;
3524
3525  spin:
3526         /*
3527          * Splice the empty reader page into the list around the head.
3528          */
3529         reader = rb_set_head_page(cpu_buffer);
3530         if (!reader)
3531                 goto out;
3532         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3533         cpu_buffer->reader_page->list.prev = reader->list.prev;
3534
3535         /*
3536          * cpu_buffer->pages just needs to point to the buffer, it
3537          *  has no specific buffer page to point to. Lets move it out
3538          *  of our way so we don't accidentally swap it.
3539          */
3540         cpu_buffer->pages = reader->list.prev;
3541
3542         /* The reader page will be pointing to the new head */
3543         rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3544
3545         /*
3546          * We want to make sure we read the overruns after we set up our
3547          * pointers to the next object. The writer side does a
3548          * cmpxchg to cross pages which acts as the mb on the writer
3549          * side. Note, the reader will constantly fail the swap
3550          * while the writer is updating the pointers, so this
3551          * guarantees that the overwrite recorded here is the one we
3552          * want to compare with the last_overrun.
3553          */
3554         smp_mb();
3555         overwrite = local_read(&(cpu_buffer->overrun));
3556
3557         /*
3558          * Here's the tricky part.
3559          *
3560          * We need to move the pointer past the header page.
3561          * But we can only do that if a writer is not currently
3562          * moving it. The page before the header page has the
3563          * flag bit '1' set if it is pointing to the page we want.
3564          * but if the writer is in the process of moving it
3565          * than it will be '2' or already moved '0'.
3566          */
3567
3568         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3569
3570         /*
3571          * If we did not convert it, then we must try again.
3572          */
3573         if (!ret)
3574                 goto spin;
3575
3576         /*
3577          * Yeah! We succeeded in replacing the page.
3578          *
3579          * Now make the new head point back to the reader page.
3580          */
3581         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3582         rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3583
3584         /* Finally update the reader page to the new head */
3585         cpu_buffer->reader_page = reader;
3586         rb_reset_reader_page(cpu_buffer);
3587
3588         if (overwrite != cpu_buffer->last_overrun) {
3589                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3590                 cpu_buffer->last_overrun = overwrite;
3591         }
3592
3593         goto again;
3594
3595  out:
3596         arch_spin_unlock(&cpu_buffer->lock);
3597         local_irq_restore(flags);
3598
3599         return reader;
3600 }
3601
3602 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3603 {
3604         struct ring_buffer_event *event;
3605         struct buffer_page *reader;
3606         unsigned length;
3607
3608         reader = rb_get_reader_page(cpu_buffer);
3609
3610         /* This function should not be called when buffer is empty */
3611         if (RB_WARN_ON(cpu_buffer, !reader))
3612                 return;
3613
3614         event = rb_reader_event(cpu_buffer);
3615
3616         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3617                 cpu_buffer->read++;
3618
3619         rb_update_read_stamp(cpu_buffer, event);
3620
3621         length = rb_event_length(event);
3622         cpu_buffer->reader_page->read += length;
3623 }
3624
3625 static void rb_advance_iter(struct ring_buffer_iter *iter)
3626 {
3627         struct ring_buffer_per_cpu *cpu_buffer;
3628         struct ring_buffer_event *event;
3629         unsigned length;
3630
3631         cpu_buffer = iter->cpu_buffer;
3632
3633         /*
3634          * Check if we are at the end of the buffer.
3635          */
3636         if (iter->head >= rb_page_size(iter->head_page)) {
3637                 /* discarded commits can make the page empty */
3638                 if (iter->head_page == cpu_buffer->commit_page)
3639                         return;
3640                 rb_inc_iter(iter);
3641                 return;
3642         }
3643
3644         event = rb_iter_head_event(iter);
3645
3646         length = rb_event_length(event);
3647
3648         /*
3649          * This should not be called to advance the header if we are
3650          * at the tail of the buffer.
3651          */
3652         if (RB_WARN_ON(cpu_buffer,
3653                        (iter->head_page == cpu_buffer->commit_page) &&
3654                        (iter->head + length > rb_commit_index(cpu_buffer))))
3655                 return;
3656
3657         rb_update_iter_read_stamp(iter, event);
3658
3659         iter->head += length;
3660
3661         /* check for end of page padding */
3662         if ((iter->head >= rb_page_size(iter->head_page)) &&
3663             (iter->head_page != cpu_buffer->commit_page))
3664                 rb_inc_iter(iter);
3665 }
3666
3667 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3668 {
3669         return cpu_buffer->lost_events;
3670 }
3671
3672 static struct ring_buffer_event *
3673 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3674                unsigned long *lost_events)
3675 {
3676         struct ring_buffer_event *event;
3677         struct buffer_page *reader;
3678         int nr_loops = 0;
3679
3680  again:
3681         /*
3682          * We repeat when a time extend is encountered.
3683          * Since the time extend is always attached to a data event,
3684          * we should never loop more than once.
3685          * (We never hit the following condition more than twice).
3686          */
3687         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3688                 return NULL;
3689
3690         reader = rb_get_reader_page(cpu_buffer);
3691         if (!reader)
3692                 return NULL;
3693
3694         event = rb_reader_event(cpu_buffer);
3695
3696         switch (event->type_len) {
3697         case RINGBUF_TYPE_PADDING:
3698                 if (rb_null_event(event))
3699                         RB_WARN_ON(cpu_buffer, 1);
3700                 /*
3701                  * Because the writer could be discarding every
3702                  * event it creates (which would probably be bad)
3703                  * if we were to go back to "again" then we may never
3704                  * catch up, and will trigger the warn on, or lock
3705                  * the box. Return the padding, and we will release
3706                  * the current locks, and try again.
3707                  */
3708                 return event;
3709
3710         case RINGBUF_TYPE_TIME_EXTEND:
3711                 /* Internal data, OK to advance */
3712                 rb_advance_reader(cpu_buffer);
3713                 goto again;
3714
3715         case RINGBUF_TYPE_TIME_STAMP:
3716                 /* FIXME: not implemented */
3717                 rb_advance_reader(cpu_buffer);
3718                 goto again;
3719
3720         case RINGBUF_TYPE_DATA:
3721                 if (ts) {
3722                         *ts = cpu_buffer->read_stamp + event->time_delta;
3723                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3724                                                          cpu_buffer->cpu, ts);
3725                 }
3726                 if (lost_events)
3727                         *lost_events = rb_lost_events(cpu_buffer);
3728                 return event;
3729
3730         default:
3731                 BUG();
3732         }
3733
3734         return NULL;
3735 }
3736 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3737
3738 static struct ring_buffer_event *
3739 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3740 {
3741         struct ring_buffer *buffer;
3742         struct ring_buffer_per_cpu *cpu_buffer;
3743         struct ring_buffer_event *event;
3744         int nr_loops = 0;
3745
3746         cpu_buffer = iter->cpu_buffer;
3747         buffer = cpu_buffer->buffer;
3748
3749         /*
3750          * Check if someone performed a consuming read to
3751          * the buffer. A consuming read invalidates the iterator
3752          * and we need to reset the iterator in this case.
3753          */
3754         if (unlikely(iter->cache_read != cpu_buffer->read ||
3755                      iter->cache_reader_page != cpu_buffer->reader_page))
3756                 rb_iter_reset(iter);
3757
3758  again:
3759         if (ring_buffer_iter_empty(iter))
3760                 return NULL;
3761
3762         /*
3763          * We repeat when a time extend is encountered.
3764          * Since the time extend is always attached to a data event,
3765          * we should never loop more than once.
3766          * (We never hit the following condition more than twice).
3767          */
3768         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3769                 return NULL;
3770
3771         if (rb_per_cpu_empty(cpu_buffer))
3772                 return NULL;
3773
3774         if (iter->head >= local_read(&iter->head_page->page->commit)) {
3775                 rb_inc_iter(iter);
3776                 goto again;
3777         }
3778
3779         event = rb_iter_head_event(iter);
3780
3781         switch (event->type_len) {
3782         case RINGBUF_TYPE_PADDING:
3783                 if (rb_null_event(event)) {
3784                         rb_inc_iter(iter);
3785                         goto again;
3786                 }
3787                 rb_advance_iter(iter);
3788                 return event;
3789
3790         case RINGBUF_TYPE_TIME_EXTEND:
3791                 /* Internal data, OK to advance */
3792                 rb_advance_iter(iter);
3793                 goto again;
3794
3795         case RINGBUF_TYPE_TIME_STAMP:
3796                 /* FIXME: not implemented */
3797                 rb_advance_iter(iter);
3798                 goto again;
3799
3800         case RINGBUF_TYPE_DATA:
3801                 if (ts) {
3802                         *ts = iter->read_stamp + event->time_delta;
3803                         ring_buffer_normalize_time_stamp(buffer,
3804                                                          cpu_buffer->cpu, ts);
3805                 }
3806                 return event;
3807
3808         default:
3809                 BUG();
3810         }
3811
3812         return NULL;
3813 }
3814 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3815
3816 static inline int rb_ok_to_lock(void)
3817 {
3818         /*
3819          * If an NMI die dumps out the content of the ring buffer
3820          * do not grab locks. We also permanently disable the ring
3821          * buffer too. A one time deal is all you get from reading
3822          * the ring buffer from an NMI.
3823          */
3824         if (likely(!in_nmi()))
3825                 return 1;
3826
3827         tracing_off_permanent();
3828         return 0;
3829 }
3830
3831 /**
3832  * ring_buffer_peek - peek at the next event to be read
3833  * @buffer: The ring buffer to read
3834  * @cpu: The cpu to peak at
3835  * @ts: The timestamp counter of this event.
3836  * @lost_events: a variable to store if events were lost (may be NULL)
3837  *
3838  * This will return the event that will be read next, but does
3839  * not consume the data.
3840  */
3841 struct ring_buffer_event *
3842 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3843                  unsigned long *lost_events)
3844 {
3845         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3846         struct ring_buffer_event *event;
3847         unsigned long flags;
3848         int dolock;
3849
3850         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3851                 return NULL;
3852
3853         dolock = rb_ok_to_lock();
3854  again:
3855         local_irq_save(flags);
3856         if (dolock)
3857                 raw_spin_lock(&cpu_buffer->reader_lock);
3858         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3859         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3860                 rb_advance_reader(cpu_buffer);
3861         if (dolock)
3862                 raw_spin_unlock(&cpu_buffer->reader_lock);
3863         local_irq_restore(flags);
3864
3865         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3866                 goto again;
3867
3868         return event;
3869 }
3870
3871 /**
3872  * ring_buffer_iter_peek - peek at the next event to be read
3873  * @iter: The ring buffer iterator
3874  * @ts: The timestamp counter of this event.
3875  *
3876  * This will return the event that will be read next, but does
3877  * not increment the iterator.
3878  */
3879 struct ring_buffer_event *
3880 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3881 {
3882         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3883         struct ring_buffer_event *event;
3884         unsigned long flags;
3885
3886  again:
3887         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3888         event = rb_iter_peek(iter, ts);
3889         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3890
3891         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3892                 goto again;
3893
3894         return event;
3895 }
3896
3897 /**
3898  * ring_buffer_consume - return an event and consume it
3899  * @buffer: The ring buffer to get the next event from
3900  * @cpu: the cpu to read the buffer from
3901  * @ts: a variable to store the timestamp (may be NULL)
3902  * @lost_events: a variable to store if events were lost (may be NULL)
3903  *
3904  * Returns the next event in the ring buffer, and that event is consumed.
3905  * Meaning, that sequential reads will keep returning a different event,
3906  * and eventually empty the ring buffer if the producer is slower.
3907  */
3908 struct ring_buffer_event *
3909 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3910                     unsigned long *lost_events)
3911 {
3912         struct ring_buffer_per_cpu *cpu_buffer;
3913         struct ring_buffer_event *event = NULL;
3914         unsigned long flags;
3915         int dolock;
3916
3917         dolock = rb_ok_to_lock();
3918
3919  again:
3920         /* might be called in atomic */
3921         preempt_disable();
3922
3923         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3924                 goto out;
3925
3926         cpu_buffer = buffer->buffers[cpu];
3927         local_irq_save(flags);
3928         if (dolock)
3929                 raw_spin_lock(&cpu_buffer->reader_lock);
3930
3931         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3932         if (event) {
3933                 cpu_buffer->lost_events = 0;
3934                 rb_advance_reader(cpu_buffer);
3935         }
3936
3937         if (dolock)
3938                 raw_spin_unlock(&cpu_buffer->reader_lock);
3939         local_irq_restore(flags);
3940
3941  out:
3942         preempt_enable();
3943
3944         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3945                 goto again;
3946
3947         return event;
3948 }
3949 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3950
3951 /**
3952  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3953  * @buffer: The ring buffer to read from
3954  * @cpu: The cpu buffer to iterate over
3955  *
3956  * This performs the initial preparations necessary to iterate
3957  * through the buffer.  Memory is allocated, buffer recording
3958  * is disabled, and the iterator pointer is returned to the caller.
3959  *
3960  * Disabling buffer recordng prevents the reading from being
3961  * corrupted. This is not a consuming read, so a producer is not
3962  * expected.
3963  *
3964  * After a sequence of ring_buffer_read_prepare calls, the user is
3965  * expected to make at least one call to ring_buffer_prepare_sync.
3966  * Afterwards, ring_buffer_read_start is invoked to get things going
3967  * for real.
3968  *
3969  * This overall must be paired with ring_buffer_finish.
3970  */
3971 struct ring_buffer_iter *
3972 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3973 {
3974         struct ring_buffer_per_cpu *cpu_buffer;
3975         struct ring_buffer_iter *iter;
3976
3977         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3978                 return NULL;
3979
3980         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3981         if (!iter)
3982                 return NULL;
3983
3984         cpu_buffer = buffer->buffers[cpu];
3985
3986         iter->cpu_buffer = cpu_buffer;
3987
3988         atomic_inc(&buffer->resize_disabled);
3989         atomic_inc(&cpu_buffer->record_disabled);
3990
3991         return iter;
3992 }
3993 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3994
3995 /**
3996  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3997  *
3998  * All previously invoked ring_buffer_read_prepare calls to prepare
3999  * iterators will be synchronized.  Afterwards, read_buffer_read_start
4000  * calls on those iterators are allowed.
4001  */
4002 void
4003 ring_buffer_read_prepare_sync(void)
4004 {
4005         synchronize_sched();
4006 }
4007 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4008
4009 /**
4010  * ring_buffer_read_start - start a non consuming read of the buffer
4011  * @iter: The iterator returned by ring_buffer_read_prepare
4012  *
4013  * This finalizes the startup of an iteration through the buffer.
4014  * The iterator comes from a call to ring_buffer_read_prepare and
4015  * an intervening ring_buffer_read_prepare_sync must have been
4016  * performed.
4017  *
4018  * Must be paired with ring_buffer_finish.
4019  */
4020 void
4021 ring_buffer_read_start(struct ring_buffer_iter *iter)
4022 {
4023         struct ring_buffer_per_cpu *cpu_buffer;
4024         unsigned long flags;
4025
4026         if (!iter)
4027                 return;
4028
4029         cpu_buffer = iter->cpu_buffer;
4030
4031         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4032         arch_spin_lock(&cpu_buffer->lock);
4033         rb_iter_reset(iter);
4034         arch_spin_unlock(&cpu_buffer->lock);
4035         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4036 }
4037 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4038
4039 /**
4040  * ring_buffer_finish - finish reading the iterator of the buffer
4041  * @iter: The iterator retrieved by ring_buffer_start
4042  *
4043  * This re-enables the recording to the buffer, and frees the
4044  * iterator.
4045  */
4046 void
4047 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4048 {
4049         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4050         unsigned long flags;
4051
4052         /*
4053          * Ring buffer is disabled from recording, here's a good place
4054          * to check the integrity of the ring buffer.
4055          * Must prevent readers from trying to read, as the check
4056          * clears the HEAD page and readers require it.
4057          */
4058         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4059         rb_check_pages(cpu_buffer);
4060         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4061
4062         atomic_dec(&cpu_buffer->record_disabled);
4063         atomic_dec(&cpu_buffer->buffer->resize_disabled);
4064         kfree(iter);
4065 }
4066 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4067
4068 /**
4069  * ring_buffer_read - read the next item in the ring buffer by the iterator
4070  * @iter: The ring buffer iterator
4071  * @ts: The time stamp of the event read.
4072  *
4073  * This reads the next event in the ring buffer and increments the iterator.
4074  */
4075 struct ring_buffer_event *
4076 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4077 {
4078         struct ring_buffer_event *event;
4079         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4080         unsigned long flags;
4081
4082         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4083  again:
4084         event = rb_iter_peek(iter, ts);
4085         if (!event)
4086                 goto out;
4087
4088         if (event->type_len == RINGBUF_TYPE_PADDING)
4089                 goto again;
4090
4091         rb_advance_iter(iter);
4092  out:
4093         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4094
4095         return event;
4096 }
4097 EXPORT_SYMBOL_GPL(ring_buffer_read);
4098
4099 /**
4100  * ring_buffer_size - return the size of the ring buffer (in bytes)
4101  * @buffer: The ring buffer.
4102  */
4103 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4104 {
4105         /*
4106          * Earlier, this method returned
4107          *      BUF_PAGE_SIZE * buffer->nr_pages
4108          * Since the nr_pages field is now removed, we have converted this to
4109          * return the per cpu buffer value.
4110          */
4111         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4112                 return 0;
4113
4114         return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4115 }
4116 EXPORT_SYMBOL_GPL(ring_buffer_size);
4117
4118 static void
4119 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4120 {
4121         rb_head_page_deactivate(cpu_buffer);
4122
4123         cpu_buffer->head_page
4124                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4125         local_set(&cpu_buffer->head_page->write, 0);
4126         local_set(&cpu_buffer->head_page->entries, 0);
4127         local_set(&cpu_buffer->head_page->page->commit, 0);
4128
4129         cpu_buffer->head_page->read = 0;
4130
4131         cpu_buffer->tail_page = cpu_buffer->head_page;
4132         cpu_buffer->commit_page = cpu_buffer->head_page;
4133
4134         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4135         INIT_LIST_HEAD(&cpu_buffer->new_pages);
4136         local_set(&cpu_buffer->reader_page->write, 0);
4137         local_set(&cpu_buffer->reader_page->entries, 0);
4138         local_set(&cpu_buffer->reader_page->page->commit, 0);
4139         cpu_buffer->reader_page->read = 0;
4140
4141         local_set(&cpu_buffer->entries_bytes, 0);
4142         local_set(&cpu_buffer->overrun, 0);
4143         local_set(&cpu_buffer->commit_overrun, 0);
4144         local_set(&cpu_buffer->dropped_events, 0);
4145         local_set(&cpu_buffer->entries, 0);
4146         local_set(&cpu_buffer->committing, 0);
4147         local_set(&cpu_buffer->commits, 0);
4148         cpu_buffer->read = 0;
4149         cpu_buffer->read_bytes = 0;
4150
4151         cpu_buffer->write_stamp = 0;
4152         cpu_buffer->read_stamp = 0;
4153
4154         cpu_buffer->lost_events = 0;
4155         cpu_buffer->last_overrun = 0;
4156
4157         rb_head_page_activate(cpu_buffer);
4158 }
4159
4160 /**
4161  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4162  * @buffer: The ring buffer to reset a per cpu buffer of
4163  * @cpu: The CPU buffer to be reset
4164  */
4165 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4166 {
4167         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4168         unsigned long flags;
4169
4170         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4171                 return;
4172
4173         atomic_inc(&buffer->resize_disabled);
4174         atomic_inc(&cpu_buffer->record_disabled);
4175
4176         /* Make sure all commits have finished */
4177         synchronize_sched();
4178
4179         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4180
4181         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4182                 goto out;
4183
4184         arch_spin_lock(&cpu_buffer->lock);
4185
4186         rb_reset_cpu(cpu_buffer);
4187
4188         arch_spin_unlock(&cpu_buffer->lock);
4189
4190  out:
4191         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4192
4193         atomic_dec(&cpu_buffer->record_disabled);
4194         atomic_dec(&buffer->resize_disabled);
4195 }
4196 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4197
4198 /**
4199  * ring_buffer_reset - reset a ring buffer
4200  * @buffer: The ring buffer to reset all cpu buffers
4201  */
4202 void ring_buffer_reset(struct ring_buffer *buffer)
4203 {
4204         int cpu;
4205
4206         for_each_buffer_cpu(buffer, cpu)
4207                 ring_buffer_reset_cpu(buffer, cpu);
4208 }
4209 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4210
4211 /**
4212  * rind_buffer_empty - is the ring buffer empty?
4213  * @buffer: The ring buffer to test
4214  */
4215 int ring_buffer_empty(struct ring_buffer *buffer)
4216 {
4217         struct ring_buffer_per_cpu *cpu_buffer;
4218         unsigned long flags;
4219         int dolock;
4220         int cpu;
4221         int ret;
4222
4223         dolock = rb_ok_to_lock();
4224
4225         /* yes this is racy, but if you don't like the race, lock the buffer */
4226         for_each_buffer_cpu(buffer, cpu) {
4227                 cpu_buffer = buffer->buffers[cpu];
4228                 local_irq_save(flags);
4229                 if (dolock)
4230                         raw_spin_lock(&cpu_buffer->reader_lock);
4231                 ret = rb_per_cpu_empty(cpu_buffer);
4232                 if (dolock)
4233                         raw_spin_unlock(&cpu_buffer->reader_lock);
4234                 local_irq_restore(flags);
4235
4236                 if (!ret)
4237                         return 0;
4238         }
4239
4240         return 1;
4241 }
4242 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4243
4244 /**
4245  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4246  * @buffer: The ring buffer
4247  * @cpu: The CPU buffer to test
4248  */
4249 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4250 {
4251         struct ring_buffer_per_cpu *cpu_buffer;
4252         unsigned long flags;
4253         int dolock;
4254         int ret;
4255
4256         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4257                 return 1;
4258
4259         dolock = rb_ok_to_lock();
4260
4261         cpu_buffer = buffer->buffers[cpu];
4262         local_irq_save(flags);
4263         if (dolock)
4264                 raw_spin_lock(&cpu_buffer->reader_lock);
4265         ret = rb_per_cpu_empty(cpu_buffer);
4266         if (dolock)
4267                 raw_spin_unlock(&cpu_buffer->reader_lock);
4268         local_irq_restore(flags);
4269
4270         return ret;
4271 }
4272 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4273
4274 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4275 /**
4276  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4277  * @buffer_a: One buffer to swap with
4278  * @buffer_b: The other buffer to swap with
4279  *
4280  * This function is useful for tracers that want to take a "snapshot"
4281  * of a CPU buffer and has another back up buffer lying around.
4282  * it is expected that the tracer handles the cpu buffer not being
4283  * used at the moment.
4284  */
4285 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4286                          struct ring_buffer *buffer_b, int cpu)
4287 {
4288         struct ring_buffer_per_cpu *cpu_buffer_a;
4289         struct ring_buffer_per_cpu *cpu_buffer_b;
4290         int ret = -EINVAL;
4291
4292         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4293             !cpumask_test_cpu(cpu, buffer_b->cpumask))
4294                 goto out;
4295
4296         cpu_buffer_a = buffer_a->buffers[cpu];
4297         cpu_buffer_b = buffer_b->buffers[cpu];
4298
4299         /* At least make sure the two buffers are somewhat the same */
4300         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4301                 goto out;
4302
4303         ret = -EAGAIN;
4304
4305         if (ring_buffer_flags != RB_BUFFERS_ON)
4306                 goto out;
4307
4308         if (atomic_read(&buffer_a->record_disabled))
4309                 goto out;
4310
4311         if (atomic_read(&buffer_b->record_disabled))
4312                 goto out;
4313
4314         if (atomic_read(&cpu_buffer_a->record_disabled))
4315                 goto out;
4316
4317         if (atomic_read(&cpu_buffer_b->record_disabled))
4318                 goto out;
4319
4320         /*
4321          * We can't do a synchronize_sched here because this
4322          * function can be called in atomic context.
4323          * Normally this will be called from the same CPU as cpu.
4324          * If not it's up to the caller to protect this.
4325          */
4326         atomic_inc(&cpu_buffer_a->record_disabled);
4327         atomic_inc(&cpu_buffer_b->record_disabled);
4328
4329         ret = -EBUSY;
4330         if (local_read(&cpu_buffer_a->committing))
4331                 goto out_dec;
4332         if (local_read(&cpu_buffer_b->committing))
4333                 goto out_dec;
4334
4335         buffer_a->buffers[cpu] = cpu_buffer_b;
4336         buffer_b->buffers[cpu] = cpu_buffer_a;
4337
4338         cpu_buffer_b->buffer = buffer_a;
4339         cpu_buffer_a->buffer = buffer_b;
4340
4341         ret = 0;
4342
4343 out_dec:
4344         atomic_dec(&cpu_buffer_a->record_disabled);
4345         atomic_dec(&cpu_buffer_b->record_disabled);
4346 out:
4347         return ret;
4348 }
4349 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4350 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4351
4352 /**
4353  * ring_buffer_alloc_read_page - allocate a page to read from buffer
4354  * @buffer: the buffer to allocate for.
4355  *
4356  * This function is used in conjunction with ring_buffer_read_page.
4357  * When reading a full page from the ring buffer, these functions
4358  * can be used to speed up the process. The calling function should
4359  * allocate a few pages first with this function. Then when it
4360  * needs to get pages from the ring buffer, it passes the result
4361  * of this function into ring_buffer_read_page, which will swap
4362  * the page that was allocated, with the read page of the buffer.
4363  *
4364  * Returns:
4365  *  The page allocated, or NULL on error.
4366  */
4367 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4368 {
4369         struct buffer_data_page *bpage;
4370         struct page *page;
4371
4372         page = alloc_pages_node(cpu_to_node(cpu),
4373                                 GFP_KERNEL | __GFP_NORETRY, 0);
4374         if (!page)
4375                 return NULL;
4376
4377         bpage = page_address(page);
4378
4379         rb_init_page(bpage);
4380
4381         return bpage;
4382 }
4383 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4384
4385 /**
4386  * ring_buffer_free_read_page - free an allocated read page
4387  * @buffer: the buffer the page was allocate for
4388  * @data: the page to free
4389  *
4390  * Free a page allocated from ring_buffer_alloc_read_page.
4391  */
4392 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4393 {
4394         free_page((unsigned long)data);
4395 }
4396 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4397
4398 /**
4399  * ring_buffer_read_page - extract a page from the ring buffer
4400  * @buffer: buffer to extract from
4401  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4402  * @len: amount to extract
4403  * @cpu: the cpu of the buffer to extract
4404  * @full: should the extraction only happen when the page is full.
4405  *
4406  * This function will pull out a page from the ring buffer and consume it.
4407  * @data_page must be the address of the variable that was returned
4408  * from ring_buffer_alloc_read_page. This is because the page might be used
4409  * to swap with a page in the ring buffer.
4410  *
4411  * for example:
4412  *      rpage = ring_buffer_alloc_read_page(buffer);
4413  *      if (!rpage)
4414  *              return error;
4415  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4416  *      if (ret >= 0)
4417  *              process_page(rpage, ret);
4418  *
4419  * When @full is set, the function will not return true unless
4420  * the writer is off the reader page.
4421  *
4422  * Note: it is up to the calling functions to handle sleeps and wakeups.
4423  *  The ring buffer can be used anywhere in the kernel and can not
4424  *  blindly call wake_up. The layer that uses the ring buffer must be
4425  *  responsible for that.
4426  *
4427  * Returns:
4428  *  >=0 if data has been transferred, returns the offset of consumed data.
4429  *  <0 if no data has been transferred.
4430  */
4431 int ring_buffer_read_page(struct ring_buffer *buffer,
4432                           void **data_page, size_t len, int cpu, int full)
4433 {
4434         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4435         struct ring_buffer_event *event;
4436         struct buffer_data_page *bpage;
4437         struct buffer_page *reader;
4438         unsigned long missed_events;
4439         unsigned long flags;
4440         unsigned int commit;
4441         unsigned int read;
4442         u64 save_timestamp;
4443         int ret = -1;
4444
4445         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4446                 goto out;
4447
4448         /*
4449          * If len is not big enough to hold the page header, then
4450          * we can not copy anything.
4451          */
4452         if (len <= BUF_PAGE_HDR_SIZE)
4453                 goto out;
4454
4455         len -= BUF_PAGE_HDR_SIZE;
4456
4457         if (!data_page)
4458                 goto out;
4459
4460         bpage = *data_page;
4461         if (!bpage)
4462                 goto out;
4463
4464         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4465
4466         reader = rb_get_reader_page(cpu_buffer);
4467         if (!reader)
4468                 goto out_unlock;
4469
4470         event = rb_reader_event(cpu_buffer);
4471
4472         read = reader->read;
4473         commit = rb_page_commit(reader);
4474
4475         /* Check if any events were dropped */
4476         missed_events = cpu_buffer->lost_events;
4477
4478         /*
4479          * If this page has been partially read or
4480          * if len is not big enough to read the rest of the page or
4481          * a writer is still on the page, then
4482          * we must copy the data from the page to the buffer.
4483          * Otherwise, we can simply swap the page with the one passed in.
4484          */
4485         if (read || (len < (commit - read)) ||
4486             cpu_buffer->reader_page == cpu_buffer->commit_page) {
4487                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4488                 unsigned int rpos = read;
4489                 unsigned int pos = 0;
4490                 unsigned int size;
4491
4492                 if (full)
4493                         goto out_unlock;
4494
4495                 if (len > (commit - read))
4496                         len = (commit - read);
4497
4498                 /* Always keep the time extend and data together */
4499                 size = rb_event_ts_length(event);
4500
4501                 if (len < size)
4502                         goto out_unlock;
4503
4504                 /* save the current timestamp, since the user will need it */
4505                 save_timestamp = cpu_buffer->read_stamp;
4506
4507                 /* Need to copy one event at a time */
4508                 do {
4509                         /* We need the size of one event, because
4510                          * rb_advance_reader only advances by one event,
4511                          * whereas rb_event_ts_length may include the size of
4512                          * one or two events.
4513                          * We have already ensured there's enough space if this
4514                          * is a time extend. */
4515                         size = rb_event_length(event);
4516                         memcpy(bpage->data + pos, rpage->data + rpos, size);
4517
4518                         len -= size;
4519
4520                         rb_advance_reader(cpu_buffer);
4521                         rpos = reader->read;
4522                         pos += size;
4523
4524                         if (rpos >= commit)
4525                                 break;
4526
4527                         event = rb_reader_event(cpu_buffer);
4528                         /* Always keep the time extend and data together */
4529                         size = rb_event_ts_length(event);
4530                 } while (len >= size);
4531
4532                 /* update bpage */
4533                 local_set(&bpage->commit, pos);
4534                 bpage->time_stamp = save_timestamp;
4535
4536                 /* we copied everything to the beginning */
4537                 read = 0;
4538         } else {
4539                 /* update the entry counter */
4540                 cpu_buffer->read += rb_page_entries(reader);
4541                 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4542
4543                 /* swap the pages */
4544                 rb_init_page(bpage);
4545                 bpage = reader->page;
4546                 reader->page = *data_page;
4547                 local_set(&reader->write, 0);
4548                 local_set(&reader->entries, 0);
4549                 reader->read = 0;
4550                 *data_page = bpage;
4551
4552                 /*
4553                  * Use the real_end for the data size,
4554                  * This gives us a chance to store the lost events
4555                  * on the page.
4556                  */
4557                 if (reader->real_end)
4558                         local_set(&bpage->commit, reader->real_end);
4559         }
4560         ret = read;
4561
4562         cpu_buffer->lost_events = 0;
4563
4564         commit = local_read(&bpage->commit);
4565         /*
4566          * Set a flag in the commit field if we lost events
4567          */
4568         if (missed_events) {
4569                 /* If there is room at the end of the page to save the
4570                  * missed events, then record it there.
4571                  */
4572                 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4573                         memcpy(&bpage->data[commit], &missed_events,
4574                                sizeof(missed_events));
4575                         local_add(RB_MISSED_STORED, &bpage->commit);
4576                         commit += sizeof(missed_events);
4577                 }
4578                 local_add(RB_MISSED_EVENTS, &bpage->commit);
4579         }
4580
4581         /*
4582          * This page may be off to user land. Zero it out here.
4583          */
4584         if (commit < BUF_PAGE_SIZE)
4585                 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4586
4587  out_unlock:
4588         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4589
4590  out:
4591         return ret;
4592 }
4593 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4594
4595 #ifdef CONFIG_HOTPLUG_CPU
4596 static int rb_cpu_notify(struct notifier_block *self,
4597                          unsigned long action, void *hcpu)
4598 {
4599         struct ring_buffer *buffer =
4600                 container_of(self, struct ring_buffer, cpu_notify);
4601         long cpu = (long)hcpu;
4602         int cpu_i, nr_pages_same;
4603         unsigned int nr_pages;
4604
4605         switch (action) {
4606         case CPU_UP_PREPARE:
4607         case CPU_UP_PREPARE_FROZEN:
4608                 if (cpumask_test_cpu(cpu, buffer->cpumask))
4609                         return NOTIFY_OK;
4610
4611                 nr_pages = 0;
4612                 nr_pages_same = 1;
4613                 /* check if all cpu sizes are same */
4614                 for_each_buffer_cpu(buffer, cpu_i) {
4615                         /* fill in the size from first enabled cpu */
4616                         if (nr_pages == 0)
4617                                 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4618                         if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4619                                 nr_pages_same = 0;
4620                                 break;
4621                         }
4622                 }
4623                 /* allocate minimum pages, user can later expand it */
4624                 if (!nr_pages_same)
4625                         nr_pages = 2;
4626                 buffer->buffers[cpu] =
4627                         rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4628                 if (!buffer->buffers[cpu]) {
4629                         WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4630                              cpu);
4631                         return NOTIFY_OK;
4632                 }
4633                 smp_wmb();
4634                 cpumask_set_cpu(cpu, buffer->cpumask);
4635                 break;
4636         case CPU_DOWN_PREPARE:
4637         case CPU_DOWN_PREPARE_FROZEN:
4638                 /*
4639                  * Do nothing.
4640                  *  If we were to free the buffer, then the user would
4641                  *  lose any trace that was in the buffer.
4642                  */
4643                 break;
4644         default:
4645                 break;
4646         }
4647         return NOTIFY_OK;
4648 }
4649 #endif
4650
4651 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4652 /*
4653  * This is a basic integrity check of the ring buffer.
4654  * Late in the boot cycle this test will run when configured in.
4655  * It will kick off a thread per CPU that will go into a loop
4656  * writing to the per cpu ring buffer various sizes of data.
4657  * Some of the data will be large items, some small.
4658  *
4659  * Another thread is created that goes into a spin, sending out
4660  * IPIs to the other CPUs to also write into the ring buffer.
4661  * this is to test the nesting ability of the buffer.
4662  *
4663  * Basic stats are recorded and reported. If something in the
4664  * ring buffer should happen that's not expected, a big warning
4665  * is displayed and all ring buffers are disabled.
4666  */
4667 static struct task_struct *rb_threads[NR_CPUS] __initdata;
4668
4669 struct rb_test_data {
4670         struct ring_buffer      *buffer;
4671         unsigned long           events;
4672         unsigned long           bytes_written;
4673         unsigned long           bytes_alloc;
4674         unsigned long           bytes_dropped;
4675         unsigned long           events_nested;
4676         unsigned long           bytes_written_nested;
4677         unsigned long           bytes_alloc_nested;
4678         unsigned long           bytes_dropped_nested;
4679         int                     min_size_nested;
4680         int                     max_size_nested;
4681         int                     max_size;
4682         int                     min_size;
4683         int                     cpu;
4684         int                     cnt;
4685 };
4686
4687 static struct rb_test_data rb_data[NR_CPUS] __initdata;
4688
4689 /* 1 meg per cpu */
4690 #define RB_TEST_BUFFER_SIZE     1048576
4691
4692 static char rb_string[] __initdata =
4693         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4694         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4695         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4696
4697 static bool rb_test_started __initdata;
4698
4699 struct rb_item {
4700         int size;
4701         char str[];
4702 };
4703
4704 static __init int rb_write_something(struct rb_test_data *data, bool nested)
4705 {
4706         struct ring_buffer_event *event;
4707         struct rb_item *item;
4708         bool started;
4709         int event_len;
4710         int size;
4711         int len;
4712         int cnt;
4713
4714         /* Have nested writes different that what is written */
4715         cnt = data->cnt + (nested ? 27 : 0);
4716
4717         /* Multiply cnt by ~e, to make some unique increment */
4718         size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4719
4720         len = size + sizeof(struct rb_item);
4721
4722         started = rb_test_started;
4723         /* read rb_test_started before checking buffer enabled */
4724         smp_rmb();
4725
4726         event = ring_buffer_lock_reserve(data->buffer, len);
4727         if (!event) {
4728                 /* Ignore dropped events before test starts. */
4729                 if (started) {
4730                         if (nested)
4731                                 data->bytes_dropped += len;
4732                         else
4733                                 data->bytes_dropped_nested += len;
4734                 }
4735                 return len;
4736         }
4737
4738         event_len = ring_buffer_event_length(event);
4739
4740         if (RB_WARN_ON(data->buffer, event_len < len))
4741                 goto out;
4742
4743         item = ring_buffer_event_data(event);
4744         item->size = size;
4745         memcpy(item->str, rb_string, size);
4746
4747         if (nested) {
4748                 data->bytes_alloc_nested += event_len;
4749                 data->bytes_written_nested += len;
4750                 data->events_nested++;
4751                 if (!data->min_size_nested || len < data->min_size_nested)
4752                         data->min_size_nested = len;
4753                 if (len > data->max_size_nested)
4754                         data->max_size_nested = len;
4755         } else {
4756                 data->bytes_alloc += event_len;
4757                 data->bytes_written += len;
4758                 data->events++;
4759                 if (!data->min_size || len < data->min_size)
4760                         data->max_size = len;
4761                 if (len > data->max_size)
4762                         data->max_size = len;
4763         }
4764
4765  out:
4766         ring_buffer_unlock_commit(data->buffer, event);
4767
4768         return 0;
4769 }
4770
4771 static __init int rb_test(void *arg)
4772 {
4773         struct rb_test_data *data = arg;
4774
4775         while (!kthread_should_stop()) {
4776                 rb_write_something(data, false);
4777                 data->cnt++;
4778
4779                 set_current_state(TASK_INTERRUPTIBLE);
4780                 /* Now sleep between a min of 100-300us and a max of 1ms */
4781                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4782         }
4783
4784         return 0;
4785 }
4786
4787 static __init void rb_ipi(void *ignore)
4788 {
4789         struct rb_test_data *data;
4790         int cpu = smp_processor_id();
4791
4792         data = &rb_data[cpu];
4793         rb_write_something(data, true);
4794 }
4795
4796 static __init int rb_hammer_test(void *arg)
4797 {
4798         while (!kthread_should_stop()) {
4799
4800                 /* Send an IPI to all cpus to write data! */
4801                 smp_call_function(rb_ipi, NULL, 1);
4802                 /* No sleep, but for non preempt, let others run */
4803                 schedule();
4804         }
4805
4806         return 0;
4807 }
4808
4809 static __init int test_ringbuffer(void)
4810 {
4811         struct task_struct *rb_hammer;
4812         struct ring_buffer *buffer;
4813         int cpu;
4814         int ret = 0;
4815
4816         pr_info("Running ring buffer tests...\n");
4817
4818         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4819         if (WARN_ON(!buffer))
4820                 return 0;
4821
4822         /* Disable buffer so that threads can't write to it yet */
4823         ring_buffer_record_off(buffer);
4824
4825         for_each_online_cpu(cpu) {
4826                 rb_data[cpu].buffer = buffer;
4827                 rb_data[cpu].cpu = cpu;
4828                 rb_data[cpu].cnt = cpu;
4829                 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4830                                                  "rbtester/%d", cpu);
4831                 if (WARN_ON(!rb_threads[cpu])) {
4832                         pr_cont("FAILED\n");
4833                         ret = -1;
4834                         goto out_free;
4835                 }
4836
4837                 kthread_bind(rb_threads[cpu], cpu);
4838                 wake_up_process(rb_threads[cpu]);
4839         }
4840
4841         /* Now create the rb hammer! */
4842         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4843         if (WARN_ON(!rb_hammer)) {
4844                 pr_cont("FAILED\n");
4845                 ret = -1;
4846                 goto out_free;
4847         }
4848
4849         ring_buffer_record_on(buffer);
4850         /*
4851          * Show buffer is enabled before setting rb_test_started.
4852          * Yes there's a small race window where events could be
4853          * dropped and the thread wont catch it. But when a ring
4854          * buffer gets enabled, there will always be some kind of
4855          * delay before other CPUs see it. Thus, we don't care about
4856          * those dropped events. We care about events dropped after
4857          * the threads see that the buffer is active.
4858          */
4859         smp_wmb();
4860         rb_test_started = true;
4861
4862         set_current_state(TASK_INTERRUPTIBLE);
4863         /* Just run for 10 seconds */;
4864         schedule_timeout(10 * HZ);
4865
4866         kthread_stop(rb_hammer);
4867
4868  out_free:
4869         for_each_online_cpu(cpu) {
4870                 if (!rb_threads[cpu])
4871                         break;
4872                 kthread_stop(rb_threads[cpu]);
4873         }
4874         if (ret) {
4875                 ring_buffer_free(buffer);
4876                 return ret;
4877         }
4878
4879         /* Report! */
4880         pr_info("finished\n");
4881         for_each_online_cpu(cpu) {
4882                 struct ring_buffer_event *event;
4883                 struct rb_test_data *data = &rb_data[cpu];
4884                 struct rb_item *item;
4885                 unsigned long total_events;
4886                 unsigned long total_dropped;
4887                 unsigned long total_written;
4888                 unsigned long total_alloc;
4889                 unsigned long total_read = 0;
4890                 unsigned long total_size = 0;
4891                 unsigned long total_len = 0;
4892                 unsigned long total_lost = 0;
4893                 unsigned long lost;
4894                 int big_event_size;
4895                 int small_event_size;
4896
4897                 ret = -1;
4898
4899                 total_events = data->events + data->events_nested;
4900                 total_written = data->bytes_written + data->bytes_written_nested;
4901                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4902                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4903
4904                 big_event_size = data->max_size + data->max_size_nested;
4905                 small_event_size = data->min_size + data->min_size_nested;
4906
4907                 pr_info("CPU %d:\n", cpu);
4908                 pr_info("              events:    %ld\n", total_events);
4909                 pr_info("       dropped bytes:    %ld\n", total_dropped);
4910                 pr_info("       alloced bytes:    %ld\n", total_alloc);
4911                 pr_info("       written bytes:    %ld\n", total_written);
4912                 pr_info("       biggest event:    %d\n", big_event_size);
4913                 pr_info("      smallest event:    %d\n", small_event_size);
4914
4915                 if (RB_WARN_ON(buffer, total_dropped))
4916                         break;
4917
4918                 ret = 0;
4919
4920                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4921                         total_lost += lost;
4922                         item = ring_buffer_event_data(event);
4923                         total_len += ring_buffer_event_length(event);
4924                         total_size += item->size + sizeof(struct rb_item);
4925                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4926                                 pr_info("FAILED!\n");
4927                                 pr_info("buffer had: %.*s\n", item->size, item->str);
4928                                 pr_info("expected:   %.*s\n", item->size, rb_string);
4929                                 RB_WARN_ON(buffer, 1);
4930                                 ret = -1;
4931                                 break;
4932                         }
4933                         total_read++;
4934                 }
4935                 if (ret)
4936                         break;
4937
4938                 ret = -1;
4939
4940                 pr_info("         read events:   %ld\n", total_read);
4941                 pr_info("         lost events:   %ld\n", total_lost);
4942                 pr_info("        total events:   %ld\n", total_lost + total_read);
4943                 pr_info("  recorded len bytes:   %ld\n", total_len);
4944                 pr_info(" recorded size bytes:   %ld\n", total_size);
4945                 if (total_lost)
4946                         pr_info(" With dropped events, record len and size may not match\n"
4947                                 " alloced and written from above\n");
4948                 if (!total_lost) {
4949                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
4950                                        total_size != total_written))
4951                                 break;
4952                 }
4953                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4954                         break;
4955
4956                 ret = 0;
4957         }
4958         if (!ret)
4959                 pr_info("Ring buffer PASSED!\n");
4960
4961         ring_buffer_free(buffer);
4962         return 0;
4963 }
4964
4965 late_initcall(test_ringbuffer);
4966 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */