Page cache miss tracing using ftrace on mm/filemap
authorDaniel Campello <campello@google.com>
Mon, 20 Jul 2015 17:37:56 +0000 (10:37 -0700)
committerHuang, Tao <huangtao@rock-chips.com>
Tue, 27 Oct 2015 08:35:08 +0000 (16:35 +0800)
This patch includes two trace events on generic_perform_write and
do_generic_file_read to check on the address_space mapping for the
pages to be accessed by the request.

Change-Id: Ib319b9b2c971b9e5c76645be6cfd995ef9465d77
Signed-off-by: Daniel Campello <campello@google.com>
(cherry picked from commit d3952c50853166bd04562766c9603ed86ab0da75)

Conflicts:
include/linux/pagemap.h

include/linux/pagemap.h
include/trace/events/filemap.h
mm/filemap.c

index e3dea75a078ba67b12408abd743633e3ea0a3995..ee1390e2db8081c49adfbdcfce3e57652e38f4b1 100644 (file)
@@ -243,6 +243,9 @@ static inline struct page *page_cache_alloc_readahead(struct address_space *x)
 
 typedef int filler_t(void *, struct page *);
 
+pgoff_t page_cache_next_hole(struct address_space *mapping,
+                             pgoff_t index, unsigned long max_scan);
+
 extern struct page * find_get_page(struct address_space *mapping,
                                pgoff_t index);
 extern struct page * find_lock_page(struct address_space *mapping,
@@ -391,7 +394,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
        return 0;
 }
 
-/* 
+/*
  * Wait for a page to be unlocked.
  *
  * This must be called with the caller "holding" the page,
@@ -404,7 +407,7 @@ static inline void wait_on_page_locked(struct page *page)
                wait_on_page_bit(page, PG_locked);
 }
 
-/* 
+/*
  * Wait for a page to complete writeback
  */
 static inline void wait_on_page_writeback(struct page *page)
index 0421f49a20f7a2af50ca0ab7e7280455d1871367..2489b79d1b919c62acb78a0bca6d969fc8ec24d3 100644 (file)
@@ -52,6 +52,52 @@ DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
        TP_ARGS(page)
        );
 
+DECLARE_EVENT_CLASS(mm_filemap_find_page_cache_miss,
+
+       TP_PROTO(struct file *file, loff_t pos, size_t count, int read),
+
+       TP_ARGS(file, pos, count, read),
+
+       TP_STRUCT__entry(
+               __array(char, path, MAX_FILTER_STR_VAL)
+               __field(char *, path_name)
+               __field(loff_t, pos)
+               __field(size_t, count)
+               __field(int, miss)
+       ),
+
+       TP_fast_assign(
+               __entry->path_name = d_path(&file->f_path, __entry->path, MAX_FILTER_STR_VAL);
+               __entry->pos    = pos;
+               __entry->count  = count;
+               __entry->miss = 0;
+               if ((pos & ~PAGE_CACHE_MASK) || (count % PAGE_SIZE) || read) {
+                       unsigned long ret;
+                       rcu_read_lock();
+                       ret = (count ? page_cache_next_hole(file->f_mapping,
+                                       (pos >> PAGE_CACHE_SHIFT), ((count - 1) >> PAGE_CACHE_SHIFT) + 1) : 0);
+                       rcu_read_unlock();
+                       __entry->miss = (ret >= (pos >> PAGE_CACHE_SHIFT) &&
+                                       ret <= ((pos + count - 1) >> PAGE_CACHE_SHIFT));
+               }
+       ),
+
+       TP_printk("path_name %s pos %lld count %lu miss %s",
+                 __entry->path_name,
+                 __entry->pos, __entry->count,
+                 (__entry->miss ? "yes" : "no"))
+);
+
+DEFINE_EVENT(mm_filemap_find_page_cache_miss, mm_filemap_do_generic_file_read,
+       TP_PROTO(struct file *file, loff_t pos, size_t count, int read),
+       TP_ARGS(file, pos, count, read)
+       );
+
+DEFINE_EVENT(mm_filemap_find_page_cache_miss, mm_filemap_generic_perform_write,
+       TP_PROTO(struct file *file, loff_t pos, size_t count, int read),
+       TP_ARGS(file, pos, count, read)
+       );
+
 #endif /* _TRACE_FILEMAP_H */
 
 /* This part must be outside protection */
index 7905fe721aa8ab3db06c957c9f2cc63cea1fee5f..fa1f12a3589648f36a4aa541de4e19a57495a894 100644 (file)
@@ -683,6 +683,47 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
        }
 }
 
+/**
+ * page_cache_next_hole - find the next hole (not-present entry)
+ * @mapping: mapping
+ * @index: index
+ * @max_scan: maximum range to search
+ *
+ * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
+ * lowest indexed hole.
+ *
+ * Returns: the index of the hole if found, otherwise returns an index
+ * outside of the set specified (in which case 'return - index >=
+ * max_scan' will be true). In rare cases of index wrap-around, 0 will
+ * be returned.
+ *
+ * page_cache_next_hole may be called under rcu_read_lock. However,
+ * like radix_tree_gang_lookup, this will not atomically search a
+ * snapshot of the tree at a single point in time. For example, if a
+ * hole is created at index 5, then subsequently a hole is created at
+ * index 10, page_cache_next_hole covering both indexes may return 10
+ * if called under rcu_read_lock.
+ */
+pgoff_t page_cache_next_hole(struct address_space *mapping,
+                             pgoff_t index, unsigned long max_scan)
+{
+        unsigned long i;
+
+        for (i = 0; i < max_scan; i++) {
+                struct page *page;
+
+                page = radix_tree_lookup(&mapping->page_tree, index);
+                if (!page || radix_tree_exceptional_entry(page))
+                        break;
+                index++;
+                if (index == 0)
+                        break;
+        }
+
+        return index;
+}
+EXPORT_SYMBOL(page_cache_next_hole);
+
 /**
  * find_get_page - find and get a page reference
  * @mapping: the address_space to search
@@ -1109,6 +1150,8 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
        unsigned int prev_offset;
        int error;
 
+       trace_mm_filemap_do_generic_file_read(filp, *ppos, desc->count, 1);
+
        index = *ppos >> PAGE_CACHE_SHIFT;
        prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
        prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
@@ -1503,7 +1546,7 @@ EXPORT_SYMBOL(generic_file_aio_read);
 static int page_cache_read(struct file *file, pgoff_t offset)
 {
        struct address_space *mapping = file->f_mapping;
-       struct page *page; 
+       struct page *page;
        int ret;
 
        do {
@@ -1520,7 +1563,7 @@ static int page_cache_read(struct file *file, pgoff_t offset)
                page_cache_release(page);
 
        } while (ret == AOP_TRUNCATED_PAGE);
-               
+
        return ret;
 }
 
@@ -2307,6 +2350,8 @@ static ssize_t generic_perform_write(struct file *file,
        ssize_t written = 0;
        unsigned int flags = 0;
 
+       trace_mm_filemap_generic_perform_write(file, pos, iov_iter_count(i), 0);
+
        /*
         * Copies from kernel address space cannot fail (NFSD is a big user).
         */
@@ -2405,7 +2450,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                written += status;
                *ppos = pos + status;
        }
-       
+
        return written ? written : status;
 }
 EXPORT_SYMBOL(generic_file_buffered_write);