4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
25 #include <trace/events/f2fs.h>
28 * Lock ordering for the change of data block address:
31 * update block addresses in the node page
33 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
37 struct page *node_page = dn->node_page;
38 unsigned int ofs_in_node = dn->ofs_in_node;
40 wait_on_page_writeback(node_page);
42 rn = (struct f2fs_node *)page_address(node_page);
44 /* Get physical address of data block */
45 addr_array = blkaddr_in_node(rn);
46 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 set_page_dirty(node_page);
50 int reserve_new_block(struct dnode_of_data *dn)
52 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
54 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
56 if (!inc_valid_block_count(sbi, dn->inode, 1))
59 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
61 __set_data_blkaddr(dn, NEW_ADDR);
62 dn->data_blkaddr = NEW_ADDR;
67 static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
68 struct buffer_head *bh_result)
70 struct f2fs_inode_info *fi = F2FS_I(inode);
71 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
72 pgoff_t start_fofs, end_fofs;
73 block_t start_blkaddr;
75 read_lock(&fi->ext.ext_lock);
76 if (fi->ext.len == 0) {
77 read_unlock(&fi->ext.ext_lock);
82 start_fofs = fi->ext.fofs;
83 end_fofs = fi->ext.fofs + fi->ext.len - 1;
84 start_blkaddr = fi->ext.blk_addr;
86 if (pgofs >= start_fofs && pgofs <= end_fofs) {
87 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
90 clear_buffer_new(bh_result);
91 map_bh(bh_result, inode->i_sb,
92 start_blkaddr + pgofs - start_fofs);
93 count = end_fofs - pgofs + 1;
94 if (count < (UINT_MAX >> blkbits))
95 bh_result->b_size = (count << blkbits);
97 bh_result->b_size = UINT_MAX;
100 read_unlock(&fi->ext.ext_lock);
103 read_unlock(&fi->ext.ext_lock);
107 void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
109 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
110 pgoff_t fofs, start_fofs, end_fofs;
111 block_t start_blkaddr, end_blkaddr;
113 BUG_ON(blk_addr == NEW_ADDR);
114 fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
116 /* Update the page address in the parent node */
117 __set_data_blkaddr(dn, blk_addr);
119 write_lock(&fi->ext.ext_lock);
121 start_fofs = fi->ext.fofs;
122 end_fofs = fi->ext.fofs + fi->ext.len - 1;
123 start_blkaddr = fi->ext.blk_addr;
124 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
126 /* Drop and initialize the matched extent */
127 if (fi->ext.len == 1 && fofs == start_fofs)
131 if (fi->ext.len == 0) {
132 if (blk_addr != NULL_ADDR) {
134 fi->ext.blk_addr = blk_addr;
141 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
149 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
154 /* Split the existing extent */
155 if (fi->ext.len > 1 &&
156 fofs >= start_fofs && fofs <= end_fofs) {
157 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
158 fi->ext.len = fofs - start_fofs;
160 fi->ext.fofs = fofs + 1;
161 fi->ext.blk_addr = start_blkaddr +
162 fofs - start_fofs + 1;
163 fi->ext.len -= fofs - start_fofs + 1;
167 write_unlock(&fi->ext.ext_lock);
171 write_unlock(&fi->ext.ext_lock);
176 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
178 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
179 struct address_space *mapping = inode->i_mapping;
180 struct dnode_of_data dn;
184 page = find_get_page(mapping, index);
185 if (page && PageUptodate(page))
187 f2fs_put_page(page, 0);
189 set_new_dnode(&dn, inode, NULL, NULL, 0);
190 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
195 if (dn.data_blkaddr == NULL_ADDR)
196 return ERR_PTR(-ENOENT);
198 /* By fallocate(), there is no cached page, but with NEW_ADDR */
199 if (dn.data_blkaddr == NEW_ADDR)
200 return ERR_PTR(-EINVAL);
202 page = grab_cache_page(mapping, index);
204 return ERR_PTR(-ENOMEM);
206 if (PageUptodate(page)) {
211 err = f2fs_readpage(sbi, page, dn.data_blkaddr,
212 sync ? READ_SYNC : READA);
214 wait_on_page_locked(page);
215 if (!PageUptodate(page)) {
216 f2fs_put_page(page, 0);
217 return ERR_PTR(-EIO);
224 * If it tries to access a hole, return an error.
225 * Because, the callers, functions in dir.c and GC, should be able to know
226 * whether this page exists or not.
228 struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
230 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
231 struct address_space *mapping = inode->i_mapping;
232 struct dnode_of_data dn;
236 set_new_dnode(&dn, inode, NULL, NULL, 0);
237 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
242 if (dn.data_blkaddr == NULL_ADDR)
243 return ERR_PTR(-ENOENT);
245 page = grab_cache_page(mapping, index);
247 return ERR_PTR(-ENOMEM);
249 if (PageUptodate(page))
252 BUG_ON(dn.data_blkaddr == NEW_ADDR);
253 BUG_ON(dn.data_blkaddr == NULL_ADDR);
255 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
260 if (!PageUptodate(page)) {
261 f2fs_put_page(page, 1);
262 return ERR_PTR(-EIO);
264 if (page->mapping != mapping) {
265 f2fs_put_page(page, 1);
272 * Caller ensures that this data page is never allocated.
273 * A new zero-filled data page is allocated in the page cache.
275 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
278 struct page *get_new_data_page(struct inode *inode, pgoff_t index,
281 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
282 struct address_space *mapping = inode->i_mapping;
284 struct dnode_of_data dn;
287 set_new_dnode(&dn, inode, NULL, NULL, 0);
288 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
292 if (dn.data_blkaddr == NULL_ADDR) {
293 if (reserve_new_block(&dn)) {
295 return ERR_PTR(-ENOSPC);
300 page = grab_cache_page(mapping, index);
302 return ERR_PTR(-ENOMEM);
304 if (PageUptodate(page))
307 if (dn.data_blkaddr == NEW_ADDR) {
308 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
309 SetPageUptodate(page);
311 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
315 if (!PageUptodate(page)) {
316 f2fs_put_page(page, 1);
317 return ERR_PTR(-EIO);
319 if (page->mapping != mapping) {
320 f2fs_put_page(page, 1);
326 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
327 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
328 mark_inode_dirty_sync(inode);
333 static void read_end_io(struct bio *bio, int err)
335 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
336 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
339 struct page *page = bvec->bv_page;
341 if (--bvec >= bio->bi_io_vec)
342 prefetchw(&bvec->bv_page->flags);
345 SetPageUptodate(page);
347 ClearPageUptodate(page);
351 } while (bvec >= bio->bi_io_vec);
352 kfree(bio->bi_private);
357 * Fill the locked page with data located in the block address.
358 * Return unlocked page.
360 int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
361 block_t blk_addr, int type)
363 struct block_device *bdev = sbi->sb->s_bdev;
366 trace_f2fs_readpage(page, blk_addr, type);
368 down_read(&sbi->bio_sem);
370 /* Allocate a new bio */
371 bio = f2fs_bio_alloc(bdev, 1);
373 /* Initialize the bio */
374 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
375 bio->bi_end_io = read_end_io;
377 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
378 kfree(bio->bi_private);
380 up_read(&sbi->bio_sem);
381 f2fs_put_page(page, 1);
385 submit_bio(type, bio);
386 up_read(&sbi->bio_sem);
391 * This function should be used by the data read flow only where it
392 * does not check the "create" flag that indicates block allocation.
393 * The reason for this special functionality is to exploit VFS readahead
396 static int get_data_block_ro(struct inode *inode, sector_t iblock,
397 struct buffer_head *bh_result, int create)
399 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
400 unsigned maxblocks = bh_result->b_size >> blkbits;
401 struct dnode_of_data dn;
405 /* Get the page offset from the block offset(iblock) */
406 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
408 if (check_extent_cache(inode, pgofs, bh_result)) {
409 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
413 /* When reading holes, we need its node page */
414 set_new_dnode(&dn, inode, NULL, NULL, 0);
415 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
417 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
418 return (err == -ENOENT) ? 0 : err;
421 /* It does not support data allocation */
424 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
426 unsigned int end_offset;
428 end_offset = IS_INODE(dn.node_page) ?
432 clear_buffer_new(bh_result);
434 /* Give more consecutive addresses for the read ahead */
435 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
436 if (((datablock_addr(dn.node_page,
438 != (dn.data_blkaddr + i)) || maxblocks == i)
440 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
441 bh_result->b_size = (i << blkbits);
444 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
448 static int f2fs_read_data_page(struct file *file, struct page *page)
450 return mpage_readpage(page, get_data_block_ro);
453 static int f2fs_read_data_pages(struct file *file,
454 struct address_space *mapping,
455 struct list_head *pages, unsigned nr_pages)
457 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
460 int do_write_data_page(struct page *page)
462 struct inode *inode = page->mapping->host;
463 block_t old_blk_addr, new_blk_addr;
464 struct dnode_of_data dn;
467 set_new_dnode(&dn, inode, NULL, NULL, 0);
468 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
472 old_blk_addr = dn.data_blkaddr;
474 /* This page is already truncated */
475 if (old_blk_addr == NULL_ADDR)
478 set_page_writeback(page);
481 * If current allocation needs SSR,
482 * it had better in-place writes for updated data.
484 if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
485 need_inplace_update(inode)) {
486 rewrite_data_page(F2FS_SB(inode->i_sb), page,
489 write_data_page(inode, page, &dn,
490 old_blk_addr, &new_blk_addr);
491 update_extent_cache(new_blk_addr, &dn);
498 static int f2fs_write_data_page(struct page *page,
499 struct writeback_control *wbc)
501 struct inode *inode = page->mapping->host;
502 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
503 loff_t i_size = i_size_read(inode);
504 const pgoff_t end_index = ((unsigned long long) i_size)
507 bool need_balance_fs = false;
510 if (page->index < end_index)
514 * If the offset is out-of-range of file size,
515 * this page does not have to be written to disk.
517 offset = i_size & (PAGE_CACHE_SIZE - 1);
518 if ((page->index >= end_index + 1) || !offset) {
519 if (S_ISDIR(inode->i_mode)) {
520 dec_page_count(sbi, F2FS_DIRTY_DENTS);
521 inode_dec_dirty_dents(inode);
526 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
528 if (sbi->por_doing) {
529 err = AOP_WRITEPAGE_ACTIVATE;
533 /* Dentry blocks are controlled by checkpoint */
534 if (S_ISDIR(inode->i_mode)) {
535 dec_page_count(sbi, F2FS_DIRTY_DENTS);
536 inode_dec_dirty_dents(inode);
537 err = do_write_data_page(page);
539 int ilock = mutex_lock_op(sbi);
540 err = do_write_data_page(page);
541 mutex_unlock_op(sbi, ilock);
542 need_balance_fs = true;
549 if (wbc->for_reclaim)
550 f2fs_submit_bio(sbi, DATA, true);
552 clear_cold_data(page);
556 f2fs_balance_fs(sbi);
560 wbc->pages_skipped++;
561 set_page_dirty(page);
565 #define MAX_DESIRED_PAGES_WP 4096
567 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
570 struct address_space *mapping = data;
571 int ret = mapping->a_ops->writepage(page, wbc);
572 mapping_set_error(mapping, ret);
576 static int f2fs_write_data_pages(struct address_space *mapping,
577 struct writeback_control *wbc)
579 struct inode *inode = mapping->host;
580 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
583 long excess_nrtw = 0, desired_nrtw;
585 /* deal with chardevs and other special file */
586 if (!mapping->a_ops->writepage)
589 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
590 desired_nrtw = MAX_DESIRED_PAGES_WP;
591 excess_nrtw = desired_nrtw - wbc->nr_to_write;
592 wbc->nr_to_write = desired_nrtw;
595 if (!S_ISDIR(inode->i_mode)) {
596 mutex_lock(&sbi->writepages);
599 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
601 mutex_unlock(&sbi->writepages);
602 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
604 remove_dirty_dir_inode(inode);
606 wbc->nr_to_write -= excess_nrtw;
610 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
611 loff_t pos, unsigned len, unsigned flags,
612 struct page **pagep, void **fsdata)
614 struct inode *inode = mapping->host;
615 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
617 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
618 struct dnode_of_data dn;
622 /* for nobh_write_end */
625 f2fs_balance_fs(sbi);
627 page = grab_cache_page_write_begin(mapping, index, flags);
632 ilock = mutex_lock_op(sbi);
634 set_new_dnode(&dn, inode, NULL, NULL, 0);
635 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
639 if (dn.data_blkaddr == NULL_ADDR)
640 err = reserve_new_block(&dn);
646 mutex_unlock_op(sbi, ilock);
648 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
651 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
652 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
653 unsigned end = start + len;
655 /* Reading beyond i_size is simple: memset to zero */
656 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
660 if (dn.data_blkaddr == NEW_ADDR) {
661 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
663 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
667 if (!PageUptodate(page)) {
668 f2fs_put_page(page, 1);
671 if (page->mapping != mapping) {
672 f2fs_put_page(page, 1);
677 SetPageUptodate(page);
678 clear_cold_data(page);
682 mutex_unlock_op(sbi, ilock);
683 f2fs_put_page(page, 1);
687 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
688 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
690 struct file *file = iocb->ki_filp;
691 struct inode *inode = file->f_mapping->host;
696 /* Needs synchronization with the cleaner */
697 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
701 static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
703 struct inode *inode = page->mapping->host;
704 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
705 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
706 dec_page_count(sbi, F2FS_DIRTY_DENTS);
707 inode_dec_dirty_dents(inode);
709 ClearPagePrivate(page);
712 static int f2fs_release_data_page(struct page *page, gfp_t wait)
714 ClearPagePrivate(page);
718 static int f2fs_set_data_page_dirty(struct page *page)
720 struct address_space *mapping = page->mapping;
721 struct inode *inode = mapping->host;
723 SetPageUptodate(page);
724 if (!PageDirty(page)) {
725 __set_page_dirty_nobuffers(page);
726 set_dirty_dir_page(inode, page);
732 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
734 return generic_block_bmap(mapping, block, get_data_block_ro);
737 const struct address_space_operations f2fs_dblock_aops = {
738 .readpage = f2fs_read_data_page,
739 .readpages = f2fs_read_data_pages,
740 .writepage = f2fs_write_data_page,
741 .writepages = f2fs_write_data_pages,
742 .write_begin = f2fs_write_begin,
743 .write_end = nobh_write_end,
744 .set_page_dirty = f2fs_set_data_page_dirty,
745 .invalidatepage = f2fs_invalidate_data_page,
746 .releasepage = f2fs_release_data_page,
747 .direct_IO = f2fs_direct_IO,