btrfs: drop gfp parameter from find_extent_buffer
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / extent_io.c
index b5b92824a27137980287ad2418c565b425dd1bfa..ad0f0a95ad3ac6dccc54ba80da832208c9b1234e 100644 (file)
@@ -101,7 +101,7 @@ void extent_io_exit(void)
 }
 
 void extent_io_tree_init(struct extent_io_tree *tree,
-                         struct address_space *mapping, gfp_t mask)
+                        struct address_space *mapping)
 {
        tree->state = RB_ROOT;
        INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
@@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state,
        }
 }
 
+static void uncache_state(struct extent_state **cached_ptr)
+{
+       if (cached_ptr && (*cached_ptr)) {
+               struct extent_state *state = *cached_ptr;
+               *cached_ptr = NULL;
+               free_extent_state(state);
+       }
+}
+
 /*
  * set some bits on a range in the tree.  This may require allocations or
  * sleeping, so the gfp mask is used to indicate what is allowed.
@@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
 }
 
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
-                       gfp_t mask)
+                       struct extent_state **cached_state, gfp_t mask)
 {
-       return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
-                             NULL, mask);
+       return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+                             NULL, cached_state, mask);
 }
 
 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
                                mask);
 }
 
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-                 gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
 {
        return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
                                mask);
@@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
 
        do {
                struct page *page = bvec->bv_page;
+               struct extent_state *cached = NULL;
+               struct extent_state *state;
+
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
                start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                if (++bvec <= bvec_end)
                        prefetchw(&bvec->bv_page->flags);
 
+               spin_lock(&tree->lock);
+               state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
+               if (state && state->start == start) {
+                       /*
+                        * take a reference on the state, unlock will drop
+                        * the ref
+                        */
+                       cache_state(state, &cached);
+               }
+               spin_unlock(&tree->lock);
+
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
-                                                             NULL);
+                                                             state);
                        if (ret)
                                uptodate = 0;
                }
@@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
                                if (err)
                                        uptodate = 0;
+                               uncache_state(&cached);
                                continue;
                        }
                }
 
                if (uptodate) {
-                       set_extent_uptodate(tree, start, end,
+                       set_extent_uptodate(tree, start, end, &cached,
                                            GFP_ATOMIC);
                }
-               unlock_extent(tree, start, end, GFP_ATOMIC);
+               unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
                if (whole_page) {
                        if (uptodate) {
@@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
 
        do {
                struct page *page = bvec->bv_page;
+               struct extent_state *cached = NULL;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
                start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
                        prefetchw(&bvec->bv_page->flags);
 
                if (uptodate) {
-                       set_extent_uptodate(tree, start, end, GFP_ATOMIC);
+                       set_extent_uptodate(tree, start, end, &cached,
+                                           GFP_ATOMIC);
                } else {
                        ClearPageUptodate(page);
                        SetPageError(page);
                }
 
-               unlock_extent(tree, start, end, GFP_ATOMIC);
+               unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
        } while (bvec >= bio->bi_io_vec);
 
@@ -1982,7 +2007,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
        struct btrfs_ordered_extent *ordered;
        int ret;
        int nr = 0;
-       size_t page_offset = 0;
+       size_t pg_offset = 0;
        size_t iosize;
        size_t disk_io_size;
        size_t blocksize = inode->i_sb->s_blocksize;
@@ -2016,19 +2041,22 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
        while (cur <= end) {
                if (cur >= last_byte) {
                        char *userpage;
-                       iosize = PAGE_CACHE_SIZE - page_offset;
+                       struct extent_state *cached = NULL;
+
+                       iosize = PAGE_CACHE_SIZE - pg_offset;
                        userpage = kmap_atomic(page, KM_USER0);
-                       memset(userpage + page_offset, 0, iosize);
+                       memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
                        kunmap_atomic(userpage, KM_USER0);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
-                                           GFP_NOFS);
-                       unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+                                           &cached, GFP_NOFS);
+                       unlock_extent_cached(tree, cur, cur + iosize - 1,
+                                            &cached, GFP_NOFS);
                        break;
                }
-               em = get_extent(inode, page, page_offset, cur,
+               em = get_extent(inode, page, pg_offset, cur,
                                end - cur + 1, 0);
-               if (IS_ERR(em) || !em) {
+               if (IS_ERR_OR_NULL(em)) {
                        SetPageError(page);
                        unlock_extent(tree, cur, end, GFP_NOFS);
                        break;
@@ -2063,16 +2091,19 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                /* we've found a hole, just zero and go on */
                if (block_start == EXTENT_MAP_HOLE) {
                        char *userpage;
+                       struct extent_state *cached = NULL;
+
                        userpage = kmap_atomic(page, KM_USER0);
-                       memset(userpage + page_offset, 0, iosize);
+                       memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
                        kunmap_atomic(userpage, KM_USER0);
 
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
-                                           GFP_NOFS);
-                       unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+                                           &cached, GFP_NOFS);
+                       unlock_extent_cached(tree, cur, cur + iosize - 1,
+                                            &cached, GFP_NOFS);
                        cur = cur + iosize;
-                       page_offset += iosize;
+                       pg_offset += iosize;
                        continue;
                }
                /* the get_extent function already copied into the page */
@@ -2081,7 +2112,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        check_page_uptodate(tree, page);
                        unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
                        cur = cur + iosize;
-                       page_offset += iosize;
+                       pg_offset += iosize;
                        continue;
                }
                /* we have an inline extent but it didn't get marked up
@@ -2091,7 +2122,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        SetPageError(page);
                        unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
                        cur = cur + iosize;
-                       page_offset += iosize;
+                       pg_offset += iosize;
                        continue;
                }
 
@@ -2104,7 +2135,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
                        pnr -= page->index;
                        ret = submit_extent_page(READ, tree, page,
-                                        sector, disk_io_size, page_offset,
+                                        sector, disk_io_size, pg_offset,
                                         bdev, bio, pnr,
                                         end_bio_extent_readpage, mirror_num,
                                         *bio_flags,
@@ -2115,7 +2146,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                if (ret)
                        SetPageError(page);
                cur = cur + iosize;
-               page_offset += iosize;
+               pg_offset += iosize;
        }
        if (!nr) {
                if (!PageError(page))
@@ -2192,6 +2223,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        else
                write_flags = WRITE;
 
+       trace___extent_writepage(page, inode, wbc);
+
        WARN_ON(!PageLocked(page));
        pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
        if (page->index > end_index ||
@@ -2308,7 +2341,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                }
                em = epd->get_extent(inode, page, pg_offset, cur,
                                     end - cur + 1, 1);
-               if (IS_ERR(em) || !em) {
+               if (IS_ERR_OR_NULL(em)) {
                        SetPageError(page);
                        break;
                }
@@ -2648,7 +2681,7 @@ int extent_readpages(struct extent_io_tree *tree,
                prefetchw(&page->flags);
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+                                       page->index, GFP_NOFS)) {
                        __extent_read_full_page(tree, page, get_extent,
                                                &bio, 0, &bio_flags);
                }
@@ -2718,7 +2751,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
        u64 cur_end;
        struct extent_map *em;
        unsigned blocksize = 1 << inode->i_blkbits;
-       size_t page_offset = 0;
+       size_t pg_offset = 0;
        size_t block_off_start;
        size_t block_off_end;
        int err = 0;
@@ -2734,9 +2767,9 @@ int extent_prepare_write(struct extent_io_tree *tree,
 
        lock_extent(tree, page_start, page_end, GFP_NOFS);
        while (block_start <= block_end) {
-               em = get_extent(inode, page, page_offset, block_start,
+               em = get_extent(inode, page, pg_offset, block_start,
                                block_end - block_start + 1, 1);
-               if (IS_ERR(em) || !em)
+               if (IS_ERR_OR_NULL(em))
                        goto err;
 
                cur_end = min(block_end, extent_map_end(em) - 1);
@@ -2778,7 +2811,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
                                       block_start + iosize - 1,
                                       EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
                        ret = submit_extent_page(READ, tree, page,
-                                        sector, iosize, page_offset, em->bdev,
+                                        sector, iosize, pg_offset, em->bdev,
                                         NULL, 1,
                                         end_bio_extent_preparewrite, 0,
                                         0, 0);
@@ -2787,12 +2820,15 @@ int extent_prepare_write(struct extent_io_tree *tree,
                        iocount++;
                        block_start = block_start + iosize;
                } else {
-                       set_extent_uptodate(tree, block_start, cur_end,
+                       struct extent_state *cached = NULL;
+
+                       set_extent_uptodate(tree, block_start, cur_end, &cached,
                                            GFP_NOFS);
-                       unlock_extent(tree, block_start, cur_end, GFP_NOFS);
+                       unlock_extent_cached(tree, block_start, cur_end,
+                                            &cached, GFP_NOFS);
                        block_start = cur_end + 1;
                }
-               page_offset = block_start & (PAGE_CACHE_SIZE - 1);
+               pg_offset = block_start & (PAGE_CACHE_SIZE - 1);
                free_extent_map(em);
        }
        if (iocount) {
@@ -2863,7 +2899,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
                        len = end - start + 1;
                        write_lock(&map->lock);
                        em = lookup_extent_mapping(map, start, len);
-                       if (!em || IS_ERR(em)) {
+                       if (IS_ERR_OR_NULL(em)) {
                                write_unlock(&map->lock);
                                break;
                        }
@@ -2906,7 +2942,7 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
        em = get_extent(inode, NULL, 0, start, blksize, 0);
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
                             start + blksize - 1, &cached_state, GFP_NOFS);
-       if (!em || IS_ERR(em))
+       if (IS_ERR_OR_NULL(em))
                return 0;
 
        if (em->block_start > EXTENT_MAP_LAST_BYTE)
@@ -2940,7 +2976,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
                        break;
                len = (len + sectorsize - 1) & ~(sectorsize - 1);
                em = get_extent(inode, NULL, 0, offset, len, 0);
-               if (!em || IS_ERR(em))
+               if (IS_ERR_OR_NULL(em))
                        return em;
 
                /* if this isn't a hole return it */
@@ -3341,8 +3377,7 @@ free_eb:
 }
 
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
-                                        u64 start, unsigned long len,
-                                         gfp_t mask)
+                                        u64 start, unsigned long len)
 {
        struct extent_buffer *eb;
 
@@ -3455,7 +3490,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
        num_pages = num_extent_pages(eb->start, eb->len);
 
        set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
-                           GFP_NOFS);
+                           NULL, GFP_NOFS);
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
                if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3690,6 +3725,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                       "wanted %lu %lu\n", (unsigned long long)eb->start,
                       eb->len, start, min_len);
                WARN_ON(1);
+               return -EINVAL;
        }
 
        p = extent_buffer_page(eb, i);
@@ -3882,6 +3918,12 @@ static void move_pages(struct page *dst_page, struct page *src_page,
        kunmap_atomic(dst_kaddr, KM_USER0);
 }
 
+static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
+{
+       unsigned long distance = (src > dst) ? src - dst : dst - src;
+       return distance < len;
+}
+
 static void copy_pages(struct page *dst_page, struct page *src_page,
                       unsigned long dst_off, unsigned long src_off,
                       unsigned long len)
@@ -3889,10 +3931,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
        char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
        char *src_kaddr;
 
-       if (dst_page != src_page)
+       if (dst_page != src_page) {
                src_kaddr = kmap_atomic(src_page, KM_USER1);
-       else
+       } else {
                src_kaddr = dst_kaddr;
+               BUG_ON(areas_overlap(src_off, dst_off, len));
+       }
 
        memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
        kunmap_atomic(dst_kaddr, KM_USER0);
@@ -3967,7 +4011,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
                       "len %lu len %lu\n", dst_offset, len, dst->len);
                BUG_ON(1);
        }
-       if (dst_offset < src_offset) {
+       if (!areas_overlap(src_offset, dst_offset, len)) {
                memcpy_extent_buffer(dst, dst_offset, src_offset, len);
                return;
        }