4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
25 #include <trace/events/f2fs.h>
28 * Lock ordering for the change of data block address:
31 * update block addresses in the node page
33 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
37 struct page *node_page = dn->node_page;
38 unsigned int ofs_in_node = dn->ofs_in_node;
40 f2fs_wait_on_page_writeback(node_page, NODE, false);
42 rn = F2FS_NODE(node_page);
44 /* Get physical address of data block */
45 addr_array = blkaddr_in_node(rn);
46 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 set_page_dirty(node_page);
50 int reserve_new_block(struct dnode_of_data *dn)
52 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
54 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
56 if (!inc_valid_block_count(sbi, dn->inode, 1))
59 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
61 __set_data_blkaddr(dn, NEW_ADDR);
62 dn->data_blkaddr = NEW_ADDR;
67 static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
68 struct buffer_head *bh_result)
70 struct f2fs_inode_info *fi = F2FS_I(inode);
71 pgoff_t start_fofs, end_fofs;
72 block_t start_blkaddr;
74 if (is_inode_flag_set(fi, FI_NO_EXTENT))
77 read_lock(&fi->ext.ext_lock);
78 if (fi->ext.len == 0) {
79 read_unlock(&fi->ext.ext_lock);
83 stat_inc_total_hit(inode->i_sb);
85 start_fofs = fi->ext.fofs;
86 end_fofs = fi->ext.fofs + fi->ext.len - 1;
87 start_blkaddr = fi->ext.blk_addr;
89 if (pgofs >= start_fofs && pgofs <= end_fofs) {
90 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
93 clear_buffer_new(bh_result);
94 map_bh(bh_result, inode->i_sb,
95 start_blkaddr + pgofs - start_fofs);
96 count = end_fofs - pgofs + 1;
97 if (count < (UINT_MAX >> blkbits))
98 bh_result->b_size = (count << blkbits);
100 bh_result->b_size = UINT_MAX;
102 stat_inc_read_hit(inode->i_sb);
103 read_unlock(&fi->ext.ext_lock);
106 read_unlock(&fi->ext.ext_lock);
110 void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
112 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
113 pgoff_t fofs, start_fofs, end_fofs;
114 block_t start_blkaddr, end_blkaddr;
115 int need_update = true;
117 f2fs_bug_on(blk_addr == NEW_ADDR);
118 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
121 /* Update the page address in the parent node */
122 __set_data_blkaddr(dn, blk_addr);
124 if (is_inode_flag_set(fi, FI_NO_EXTENT))
127 write_lock(&fi->ext.ext_lock);
129 start_fofs = fi->ext.fofs;
130 end_fofs = fi->ext.fofs + fi->ext.len - 1;
131 start_blkaddr = fi->ext.blk_addr;
132 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
134 /* Drop and initialize the matched extent */
135 if (fi->ext.len == 1 && fofs == start_fofs)
139 if (fi->ext.len == 0) {
140 if (blk_addr != NULL_ADDR) {
142 fi->ext.blk_addr = blk_addr;
149 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
157 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
162 /* Split the existing extent */
163 if (fi->ext.len > 1 &&
164 fofs >= start_fofs && fofs <= end_fofs) {
165 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
166 fi->ext.len = fofs - start_fofs;
168 fi->ext.fofs = fofs + 1;
169 fi->ext.blk_addr = start_blkaddr +
170 fofs - start_fofs + 1;
171 fi->ext.len -= fofs - start_fofs + 1;
177 /* Finally, if the extent is very fragmented, let's drop the cache. */
178 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
180 set_inode_flag(fi, FI_NO_EXTENT);
184 write_unlock(&fi->ext.ext_lock);
190 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
192 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
193 struct address_space *mapping = inode->i_mapping;
194 struct dnode_of_data dn;
198 page = find_get_page(mapping, index);
199 if (page && PageUptodate(page))
201 f2fs_put_page(page, 0);
203 set_new_dnode(&dn, inode, NULL, NULL, 0);
204 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
209 if (dn.data_blkaddr == NULL_ADDR)
210 return ERR_PTR(-ENOENT);
212 /* By fallocate(), there is no cached page, but with NEW_ADDR */
213 if (dn.data_blkaddr == NEW_ADDR)
214 return ERR_PTR(-EINVAL);
216 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
218 return ERR_PTR(-ENOMEM);
220 if (PageUptodate(page)) {
225 err = f2fs_readpage(sbi, page, dn.data_blkaddr,
226 sync ? READ_SYNC : READA);
228 wait_on_page_locked(page);
229 if (!PageUptodate(page)) {
230 f2fs_put_page(page, 0);
231 return ERR_PTR(-EIO);
238 * If it tries to access a hole, return an error.
239 * Because, the callers, functions in dir.c and GC, should be able to know
240 * whether this page exists or not.
242 struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
244 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
245 struct address_space *mapping = inode->i_mapping;
246 struct dnode_of_data dn;
251 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
253 return ERR_PTR(-ENOMEM);
255 set_new_dnode(&dn, inode, NULL, NULL, 0);
256 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
258 f2fs_put_page(page, 1);
263 if (dn.data_blkaddr == NULL_ADDR) {
264 f2fs_put_page(page, 1);
265 return ERR_PTR(-ENOENT);
268 if (PageUptodate(page))
272 * A new dentry page is allocated but not able to be written, since its
273 * new inode page couldn't be allocated due to -ENOSPC.
274 * In such the case, its blkaddr can be remained as NEW_ADDR.
275 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
277 if (dn.data_blkaddr == NEW_ADDR) {
278 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
279 SetPageUptodate(page);
283 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
288 if (!PageUptodate(page)) {
289 f2fs_put_page(page, 1);
290 return ERR_PTR(-EIO);
292 if (page->mapping != mapping) {
293 f2fs_put_page(page, 1);
300 * Caller ensures that this data page is never allocated.
301 * A new zero-filled data page is allocated in the page cache.
303 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
305 * Note that, npage is set only by make_empty_dir.
307 struct page *get_new_data_page(struct inode *inode,
308 struct page *npage, pgoff_t index, bool new_i_size)
310 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
311 struct address_space *mapping = inode->i_mapping;
313 struct dnode_of_data dn;
316 set_new_dnode(&dn, inode, npage, npage, 0);
317 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
321 if (dn.data_blkaddr == NULL_ADDR) {
322 if (reserve_new_block(&dn)) {
325 return ERR_PTR(-ENOSPC);
331 page = grab_cache_page(mapping, index);
333 return ERR_PTR(-ENOMEM);
335 if (PageUptodate(page))
338 if (dn.data_blkaddr == NEW_ADDR) {
339 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
340 SetPageUptodate(page);
342 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
346 if (!PageUptodate(page)) {
347 f2fs_put_page(page, 1);
348 return ERR_PTR(-EIO);
350 if (page->mapping != mapping) {
351 f2fs_put_page(page, 1);
357 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
358 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
359 /* Only the directory inode sets new_i_size */
360 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
361 mark_inode_dirty_sync(inode);
366 static void read_end_io(struct bio *bio, int err)
368 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
369 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
372 struct page *page = bvec->bv_page;
374 if (--bvec >= bio->bi_io_vec)
375 prefetchw(&bvec->bv_page->flags);
378 SetPageUptodate(page);
380 ClearPageUptodate(page);
384 } while (bvec >= bio->bi_io_vec);
389 * Fill the locked page with data located in the block address.
390 * Return unlocked page.
392 int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
393 block_t blk_addr, int type)
395 struct block_device *bdev = sbi->sb->s_bdev;
398 trace_f2fs_readpage(page, blk_addr, type);
400 /* Allocate a new bio */
401 bio = f2fs_bio_alloc(bdev, 1);
403 /* Initialize the bio */
404 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
405 bio->bi_end_io = read_end_io;
407 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
409 f2fs_put_page(page, 1);
413 submit_bio(type, bio);
418 * This function should be used by the data read flow only where it
419 * does not check the "create" flag that indicates block allocation.
420 * The reason for this special functionality is to exploit VFS readahead
423 static int get_data_block_ro(struct inode *inode, sector_t iblock,
424 struct buffer_head *bh_result, int create)
426 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
427 unsigned maxblocks = bh_result->b_size >> blkbits;
428 struct dnode_of_data dn;
432 /* Get the page offset from the block offset(iblock) */
433 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
435 if (check_extent_cache(inode, pgofs, bh_result)) {
436 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
440 /* When reading holes, we need its node page */
441 set_new_dnode(&dn, inode, NULL, NULL, 0);
442 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
444 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
445 return (err == -ENOENT) ? 0 : err;
448 /* It does not support data allocation */
451 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
453 unsigned int end_offset;
455 end_offset = IS_INODE(dn.node_page) ?
456 ADDRS_PER_INODE(F2FS_I(inode)) :
459 clear_buffer_new(bh_result);
461 /* Give more consecutive addresses for the read ahead */
462 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
463 if (((datablock_addr(dn.node_page,
465 != (dn.data_blkaddr + i)) || maxblocks == i)
467 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
468 bh_result->b_size = (i << blkbits);
471 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
475 static int f2fs_read_data_page(struct file *file, struct page *page)
477 return mpage_readpage(page, get_data_block_ro);
480 static int f2fs_read_data_pages(struct file *file,
481 struct address_space *mapping,
482 struct list_head *pages, unsigned nr_pages)
484 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
487 int do_write_data_page(struct page *page)
489 struct inode *inode = page->mapping->host;
490 block_t old_blk_addr, new_blk_addr;
491 struct dnode_of_data dn;
494 set_new_dnode(&dn, inode, NULL, NULL, 0);
495 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
499 old_blk_addr = dn.data_blkaddr;
501 /* This page is already truncated */
502 if (old_blk_addr == NULL_ADDR)
505 set_page_writeback(page);
508 * If current allocation needs SSR,
509 * it had better in-place writes for updated data.
511 if (unlikely(old_blk_addr != NEW_ADDR &&
512 !is_cold_data(page) &&
513 need_inplace_update(inode))) {
514 rewrite_data_page(F2FS_SB(inode->i_sb), page,
517 write_data_page(inode, page, &dn,
518 old_blk_addr, &new_blk_addr);
519 update_extent_cache(new_blk_addr, &dn);
526 static int f2fs_write_data_page(struct page *page,
527 struct writeback_control *wbc)
529 struct inode *inode = page->mapping->host;
530 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
531 loff_t i_size = i_size_read(inode);
532 const pgoff_t end_index = ((unsigned long long) i_size)
535 bool need_balance_fs = false;
538 if (page->index < end_index)
542 * If the offset is out-of-range of file size,
543 * this page does not have to be written to disk.
545 offset = i_size & (PAGE_CACHE_SIZE - 1);
546 if ((page->index >= end_index + 1) || !offset) {
547 if (S_ISDIR(inode->i_mode)) {
548 dec_page_count(sbi, F2FS_DIRTY_DENTS);
549 inode_dec_dirty_dents(inode);
554 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
556 if (sbi->por_doing) {
557 err = AOP_WRITEPAGE_ACTIVATE;
561 /* Dentry blocks are controlled by checkpoint */
562 if (S_ISDIR(inode->i_mode)) {
563 dec_page_count(sbi, F2FS_DIRTY_DENTS);
564 inode_dec_dirty_dents(inode);
565 err = do_write_data_page(page);
568 err = do_write_data_page(page);
570 need_balance_fs = true;
577 if (wbc->for_reclaim)
578 f2fs_submit_bio(sbi, DATA, true);
580 clear_cold_data(page);
584 f2fs_balance_fs(sbi);
588 wbc->pages_skipped++;
589 set_page_dirty(page);
593 #define MAX_DESIRED_PAGES_WP 4096
595 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
598 struct address_space *mapping = data;
599 int ret = mapping->a_ops->writepage(page, wbc);
600 mapping_set_error(mapping, ret);
604 static int f2fs_write_data_pages(struct address_space *mapping,
605 struct writeback_control *wbc)
607 struct inode *inode = mapping->host;
608 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
611 long excess_nrtw = 0, desired_nrtw;
613 /* deal with chardevs and other special file */
614 if (!mapping->a_ops->writepage)
617 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
618 desired_nrtw = MAX_DESIRED_PAGES_WP;
619 excess_nrtw = desired_nrtw - wbc->nr_to_write;
620 wbc->nr_to_write = desired_nrtw;
623 if (!S_ISDIR(inode->i_mode)) {
624 mutex_lock(&sbi->writepages);
627 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
629 mutex_unlock(&sbi->writepages);
630 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
632 remove_dirty_dir_inode(inode);
634 wbc->nr_to_write -= excess_nrtw;
638 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
639 loff_t pos, unsigned len, unsigned flags,
640 struct page **pagep, void **fsdata)
642 struct inode *inode = mapping->host;
643 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
645 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
646 struct dnode_of_data dn;
649 f2fs_balance_fs(sbi);
651 page = grab_cache_page_write_begin(mapping, index, flags);
658 set_new_dnode(&dn, inode, NULL, NULL, 0);
659 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
663 if (dn.data_blkaddr == NULL_ADDR)
664 err = reserve_new_block(&dn);
672 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
675 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
676 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
677 unsigned end = start + len;
679 /* Reading beyond i_size is simple: memset to zero */
680 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
684 if (dn.data_blkaddr == NEW_ADDR) {
685 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
687 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
691 if (!PageUptodate(page)) {
692 f2fs_put_page(page, 1);
695 if (page->mapping != mapping) {
696 f2fs_put_page(page, 1);
701 SetPageUptodate(page);
702 clear_cold_data(page);
707 f2fs_put_page(page, 1);
711 static int f2fs_write_end(struct file *file,
712 struct address_space *mapping,
713 loff_t pos, unsigned len, unsigned copied,
714 struct page *page, void *fsdata)
716 struct inode *inode = page->mapping->host;
718 SetPageUptodate(page);
719 set_page_dirty(page);
721 if (pos + copied > i_size_read(inode)) {
722 i_size_write(inode, pos + copied);
723 mark_inode_dirty(inode);
724 update_inode_page(inode);
727 f2fs_put_page(page, 1);
731 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
732 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
734 struct file *file = iocb->ki_filp;
735 struct inode *inode = file->f_mapping->host;
740 /* Needs synchronization with the cleaner */
741 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
745 static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
748 struct inode *inode = page->mapping->host;
749 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
750 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
751 dec_page_count(sbi, F2FS_DIRTY_DENTS);
752 inode_dec_dirty_dents(inode);
754 ClearPagePrivate(page);
757 static int f2fs_release_data_page(struct page *page, gfp_t wait)
759 ClearPagePrivate(page);
763 static int f2fs_set_data_page_dirty(struct page *page)
765 struct address_space *mapping = page->mapping;
766 struct inode *inode = mapping->host;
768 trace_f2fs_set_page_dirty(page, DATA);
770 SetPageUptodate(page);
771 if (!PageDirty(page)) {
772 __set_page_dirty_nobuffers(page);
773 set_dirty_dir_page(inode, page);
779 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
781 return generic_block_bmap(mapping, block, get_data_block_ro);
784 const struct address_space_operations f2fs_dblock_aops = {
785 .readpage = f2fs_read_data_page,
786 .readpages = f2fs_read_data_pages,
787 .writepage = f2fs_write_data_page,
788 .writepages = f2fs_write_data_pages,
789 .write_begin = f2fs_write_begin,
790 .write_end = f2fs_write_end,
791 .set_page_dirty = f2fs_set_data_page_dirty,
792 .invalidatepage = f2fs_invalidate_data_page,
793 .releasepage = f2fs_release_data_page,
794 .direct_IO = f2fs_direct_IO,