nfs: page group support in nfs_mark_uptodate
authorWeston Andros Adamson <dros@primarydata.com>
Thu, 15 May 2014 15:56:48 +0000 (11:56 -0400)
committerTrond Myklebust <trond.myklebust@primarydata.com>
Thu, 29 May 2014 15:11:46 +0000 (11:11 -0400)
Change how nfs_mark_uptodate checks to see if writes cover a whole page.

This patch should have no effect yet since all page groups currently
have one request, but will come into play when pg_test functions are
modified to split pages into sub-page regions.

Signed-off-by: Weston Andros Adamson <dros@primarydata.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
fs/nfs/write.c

index 5d752766139daad90cc5ac1c009795753a1e6317..17b98952f7bdda895434844c0c93b39583be358f 100644 (file)
@@ -154,18 +154,78 @@ static void nfs_set_pageerror(struct page *page)
        nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
 }
 
+/*
+ * nfs_page_group_search_locked
+ * @head - head request of page group
+ * @page_offset - offset into page
+ *
+ * Search page group with head @head to find a request that contains the
+ * page offset @page_offset.
+ *
+ * Returns a pointer to the first matching nfs request, or NULL if no
+ * match is found.
+ *
+ * Must be called with the page group lock held
+ */
+static struct nfs_page *
+nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
+{
+       struct nfs_page *req;
+
+       WARN_ON_ONCE(head != head->wb_head);
+       WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags));
+
+       req = head;
+       do {
+               if (page_offset >= req->wb_pgbase &&
+                   page_offset < (req->wb_pgbase + req->wb_bytes))
+                       return req;
+
+               req = req->wb_this_page;
+       } while (req != head);
+
+       return NULL;
+}
+
+/*
+ * nfs_page_group_covers_page
+ * @head - head request of page group
+ *
+ * Return true if the page group with head @head covers the whole page,
+ * returns false otherwise
+ */
+static bool nfs_page_group_covers_page(struct nfs_page *req)
+{
+       struct nfs_page *tmp;
+       unsigned int pos = 0;
+       unsigned int len = nfs_page_length(req->wb_page);
+
+       nfs_page_group_lock(req);
+
+       do {
+               tmp = nfs_page_group_search_locked(req->wb_head, pos);
+               if (tmp) {
+                       /* no way this should happen */
+                       WARN_ON_ONCE(tmp->wb_pgbase != pos);
+                       pos += tmp->wb_bytes - (pos - tmp->wb_pgbase);
+               }
+       } while (tmp && pos < len);
+
+       nfs_page_group_unlock(req);
+       WARN_ON_ONCE(pos > len);
+       return pos == len;
+}
+
 /* We can set the PG_uptodate flag if we see that a write request
  * covers the full page.
  */
-static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
+static void nfs_mark_uptodate(struct nfs_page *req)
 {
-       if (PageUptodate(page))
-               return;
-       if (base != 0)
+       if (PageUptodate(req->wb_page))
                return;
-       if (count != nfs_page_length(page))
+       if (!nfs_page_group_covers_page(req))
                return;
-       SetPageUptodate(page);
+       SetPageUptodate(req->wb_page);
 }
 
 static int wb_priority(struct writeback_control *wbc)
@@ -796,7 +856,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
                return PTR_ERR(req);
        /* Update file length */
        nfs_grow_file(page, offset, count);
-       nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
+       nfs_mark_uptodate(req);
        nfs_mark_request_dirty(req);
        nfs_unlock_and_release_request(req);
        return 0;