Merge branch 'v4.4/topic/OPPv2' into linux-linaro-lsk-v4.4
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ring_buffer.c
index 75f1d05ea82dcce04e9f6a581f26969e503dc773..acbb0e73d3a215701574b0c67fce832c742d825e 100644 (file)
@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
        raw_spinlock_t                  reader_lock;    /* serialize readers */
        arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
-       unsigned int                    nr_pages;
+       unsigned long                   nr_pages;
        unsigned int                    current_context;
        struct list_head                *pages;
        struct buffer_page              *head_page;     /* read from head */
@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
        u64                             write_stamp;
        u64                             read_stamp;
        /* ring buffer pages to update, > 0 to add, < 0 to remove */
-       int                             nr_pages_to_update;
+       long                            nr_pages_to_update;
        struct list_head                new_pages; /* new pages to add */
        struct work_struct              update_pages_work;
        struct completion               update_done;
@@ -1137,10 +1137,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
        return 0;
 }
 
-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
 {
-       int i;
        struct buffer_page *bpage, *tmp;
+       long i;
 
        for (i = 0; i < nr_pages; i++) {
                struct page *page;
@@ -1177,7 +1177,7 @@ free_pages:
 }
 
 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
-                            unsigned nr_pages)
+                            unsigned long nr_pages)
 {
        LIST_HEAD(pages);
 
@@ -1202,7 +1202,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct buffer_page *bpage;
@@ -1302,8 +1302,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
                                        struct lock_class_key *key)
 {
        struct ring_buffer *buffer;
+       long nr_pages;
        int bsize;
-       int cpu, nr_pages;
+       int cpu;
 
        /* keep it in its own cache line */
        buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1429,12 +1430,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
 }
 
 static int
-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
 {
        struct list_head *tail_page, *to_remove, *next_page;
        struct buffer_page *to_remove_page, *tmp_iter_page;
        struct buffer_page *last_page, *first_page;
-       unsigned int nr_removed;
+       unsigned long nr_removed;
        unsigned long head_bit;
        int page_entries;
 
@@ -1651,7 +1652,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
                        int cpu_id)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       unsigned nr_pages;
+       unsigned long nr_pages;
        int cpu, err = 0;
 
        /*
@@ -1665,14 +1666,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
            !cpumask_test_cpu(cpu_id, buffer->cpumask))
                return size;
 
-       size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
-       size *= BUF_PAGE_SIZE;
+       nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
        /* we need a minimum of two pages */
-       if (size < BUF_PAGE_SIZE * 2)
-               size = BUF_PAGE_SIZE * 2;
+       if (nr_pages < 2)
+               nr_pages = 2;
 
-       nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+       size = nr_pages * BUF_PAGE_SIZE;
 
        /*
         * Don't succeed if resizing is disabled, as a reader might be
@@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event)
        return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
 }
 
-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
-{
-       cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
-       cpu_buffer->reader_page->read = 0;
-}
-
 static void rb_inc_iter(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
        event = __rb_reserve_next(cpu_buffer, &info);
 
-       if (unlikely(PTR_ERR(event) == -EAGAIN))
+       if (unlikely(PTR_ERR(event) == -EAGAIN)) {
+               if (info.add_timestamp)
+                       info.length -= RB_LEN_TIME_EXTEND;
                goto again;
+       }
 
        if (!event)
                goto out_fail;
@@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 
        /* Finally update the reader page to the new head */
        cpu_buffer->reader_page = reader;
-       rb_reset_reader_page(cpu_buffer);
+       cpu_buffer->reader_page->read = 0;
 
        if (overwrite != cpu_buffer->last_overrun) {
                cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        goto again;
 
  out:
+       /* Update the read_stamp on the first event */
+       if (reader && reader->read == 0)
+               cpu_buffer->read_stamp = reader->page->time_stamp;
+
        arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
@@ -4644,8 +4645,9 @@ static int rb_cpu_notify(struct notifier_block *self,
        struct ring_buffer *buffer =
                container_of(self, struct ring_buffer, cpu_notify);
        long cpu = (long)hcpu;
-       int cpu_i, nr_pages_same;
-       unsigned int nr_pages;
+       long nr_pages_same;
+       int cpu_i;
+       unsigned long nr_pages;
 
        switch (action) {
        case CPU_UP_PREPARE: