Merge branch 'for-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata
[firefly-linux-kernel-4.4.55.git] / fs / mpage.c
index 778a4ddef77a21844b08af82058d3b188371dc01..09abba7653aa8db8189d05d7c2094b77ef1998a9 100644 (file)
@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
 static struct bio *
 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
                sector_t *last_block_in_bio, struct buffer_head *map_bh,
-               unsigned long *first_logical_block, get_block_t get_block)
+               unsigned long *first_logical_block, get_block_t get_block,
+               gfp_t gfp)
 {
        struct inode *inode = page->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
@@ -277,8 +278,7 @@ alloc_new:
                                goto out;
                }
                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-                               min_t(int, nr_pages, BIO_MAX_PAGES),
-                               GFP_KERNEL);
+                               min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
                if (bio == NULL)
                        goto confused;
        }
@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
+       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
                prefetchw(&page->flags);
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+                                       page->index,
+                                       gfp)) {
                        bio = do_mpage_readpage(bio, page,
                                        nr_pages - page_idx,
                                        &last_block_in_bio, &map_bh,
                                        &first_logical_block,
-                                       get_block);
+                                       get_block, gfp);
                }
                page_cache_release(page);
        }
@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
+       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
        bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
-                       &map_bh, &first_logical_block, get_block);
+                       &map_bh, &first_logical_block, get_block, gfp);
        if (bio)
                mpage_bio_submit(READ, bio);
        return 0;
@@ -482,6 +485,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
        struct buffer_head map_bh;
        loff_t i_size = i_size_read(inode);
        int ret = 0;
+       int wr = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
 
        if (page_has_buffers(page)) {
                struct buffer_head *head = page_buffers(page);
@@ -590,7 +594,7 @@ page_is_mapped:
         * This page will go to BIO.  Do we need to send this BIO off first?
         */
        if (bio && mpd->last_block_in_bio != blocks[0] - 1)
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
 
 alloc_new:
        if (bio == NULL) {
@@ -617,7 +621,7 @@ alloc_new:
        wbc_account_io(wbc, page, PAGE_SIZE);
        length = first_unmapped << blkbits;
        if (bio_add_page(bio, page, length, 0) < length) {
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
                goto alloc_new;
        }
 
@@ -627,7 +631,7 @@ alloc_new:
        set_page_writeback(page);
        unlock_page(page);
        if (boundary || (first_unmapped != blocks_per_page)) {
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
                if (boundary_block) {
                        write_boundary_block(boundary_bdev,
                                        boundary_block, 1 << blkbits);
@@ -639,7 +643,7 @@ alloc_new:
 
 confused:
        if (bio)
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
 
        if (mpd->use_writepage) {
                ret = mapping->a_ops->writepage(page, wbc);
@@ -695,8 +699,11 @@ mpage_writepages(struct address_space *mapping,
                };
 
                ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
-               if (mpd.bio)
-                       mpage_bio_submit(WRITE, mpd.bio);
+               if (mpd.bio) {
+                       int wr = (wbc->sync_mode == WB_SYNC_ALL ?
+                                 WRITE_SYNC : WRITE);
+                       mpage_bio_submit(wr, mpd.bio);
+               }
        }
        blk_finish_plug(&plug);
        return ret;
@@ -713,8 +720,11 @@ int mpage_writepage(struct page *page, get_block_t get_block,
                .use_writepage = 0,
        };
        int ret = __mpage_writepage(page, wbc, &mpd);
-       if (mpd.bio)
-               mpage_bio_submit(WRITE, mpd.bio);
+       if (mpd.bio) {
+               int wr = (wbc->sync_mode == WB_SYNC_ALL ?
+                         WRITE_SYNC : WRITE);
+               mpage_bio_submit(wr, mpd.bio);
+       }
        return ret;
 }
 EXPORT_SYMBOL(mpage_writepage);