4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
25 #include <trace/events/f2fs.h>
28 * Lock ordering for the change of data block address:
31 * update block addresses in the node page
33 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
37 struct page *node_page = dn->node_page;
38 unsigned int ofs_in_node = dn->ofs_in_node;
40 wait_on_page_writeback(node_page);
42 rn = (struct f2fs_node *)page_address(node_page);
44 /* Get physical address of data block */
45 addr_array = blkaddr_in_node(rn);
46 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 set_page_dirty(node_page);
50 int reserve_new_block(struct dnode_of_data *dn)
52 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
54 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
56 if (!inc_valid_block_count(sbi, dn->inode, 1))
59 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
61 __set_data_blkaddr(dn, NEW_ADDR);
62 dn->data_blkaddr = NEW_ADDR;
67 static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
68 struct buffer_head *bh_result)
70 struct f2fs_inode_info *fi = F2FS_I(inode);
71 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
72 pgoff_t start_fofs, end_fofs;
73 block_t start_blkaddr;
75 read_lock(&fi->ext.ext_lock);
76 if (fi->ext.len == 0) {
77 read_unlock(&fi->ext.ext_lock);
82 start_fofs = fi->ext.fofs;
83 end_fofs = fi->ext.fofs + fi->ext.len - 1;
84 start_blkaddr = fi->ext.blk_addr;
86 if (pgofs >= start_fofs && pgofs <= end_fofs) {
87 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
90 clear_buffer_new(bh_result);
91 map_bh(bh_result, inode->i_sb,
92 start_blkaddr + pgofs - start_fofs);
93 count = end_fofs - pgofs + 1;
94 if (count < (UINT_MAX >> blkbits))
95 bh_result->b_size = (count << blkbits);
97 bh_result->b_size = UINT_MAX;
100 read_unlock(&fi->ext.ext_lock);
103 read_unlock(&fi->ext.ext_lock);
107 void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
109 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
110 pgoff_t fofs, start_fofs, end_fofs;
111 block_t start_blkaddr, end_blkaddr;
113 BUG_ON(blk_addr == NEW_ADDR);
114 fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
116 /* Update the page address in the parent node */
117 __set_data_blkaddr(dn, blk_addr);
119 write_lock(&fi->ext.ext_lock);
121 start_fofs = fi->ext.fofs;
122 end_fofs = fi->ext.fofs + fi->ext.len - 1;
123 start_blkaddr = fi->ext.blk_addr;
124 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
126 /* Drop and initialize the matched extent */
127 if (fi->ext.len == 1 && fofs == start_fofs)
131 if (fi->ext.len == 0) {
132 if (blk_addr != NULL_ADDR) {
134 fi->ext.blk_addr = blk_addr;
141 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
149 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
154 /* Split the existing extent */
155 if (fi->ext.len > 1 &&
156 fofs >= start_fofs && fofs <= end_fofs) {
157 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
158 fi->ext.len = fofs - start_fofs;
160 fi->ext.fofs = fofs + 1;
161 fi->ext.blk_addr = start_blkaddr +
162 fofs - start_fofs + 1;
163 fi->ext.len -= fofs - start_fofs + 1;
167 write_unlock(&fi->ext.ext_lock);
171 write_unlock(&fi->ext.ext_lock);
176 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
178 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
179 struct address_space *mapping = inode->i_mapping;
180 struct dnode_of_data dn;
184 page = find_get_page(mapping, index);
185 if (page && PageUptodate(page))
187 f2fs_put_page(page, 0);
189 set_new_dnode(&dn, inode, NULL, NULL, 0);
190 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
195 if (dn.data_blkaddr == NULL_ADDR)
196 return ERR_PTR(-ENOENT);
198 /* By fallocate(), there is no cached page, but with NEW_ADDR */
199 if (dn.data_blkaddr == NEW_ADDR)
200 return ERR_PTR(-EINVAL);
202 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
204 return ERR_PTR(-ENOMEM);
206 if (PageUptodate(page)) {
211 err = f2fs_readpage(sbi, page, dn.data_blkaddr,
212 sync ? READ_SYNC : READA);
214 wait_on_page_locked(page);
215 if (!PageUptodate(page)) {
216 f2fs_put_page(page, 0);
217 return ERR_PTR(-EIO);
224 * If it tries to access a hole, return an error.
225 * Because, the callers, functions in dir.c and GC, should be able to know
226 * whether this page exists or not.
228 struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
230 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
231 struct address_space *mapping = inode->i_mapping;
232 struct dnode_of_data dn;
237 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
239 return ERR_PTR(-ENOMEM);
241 set_new_dnode(&dn, inode, NULL, NULL, 0);
242 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
244 f2fs_put_page(page, 1);
249 if (dn.data_blkaddr == NULL_ADDR) {
250 f2fs_put_page(page, 1);
251 return ERR_PTR(-ENOENT);
254 if (PageUptodate(page))
257 BUG_ON(dn.data_blkaddr == NEW_ADDR);
258 BUG_ON(dn.data_blkaddr == NULL_ADDR);
260 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
265 if (!PageUptodate(page)) {
266 f2fs_put_page(page, 1);
267 return ERR_PTR(-EIO);
269 if (page->mapping != mapping) {
270 f2fs_put_page(page, 1);
277 * Caller ensures that this data page is never allocated.
278 * A new zero-filled data page is allocated in the page cache.
280 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
282 * Note that, npage is set only by make_empty_dir.
284 struct page *get_new_data_page(struct inode *inode,
285 struct page *npage, pgoff_t index, bool new_i_size)
287 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
288 struct address_space *mapping = inode->i_mapping;
290 struct dnode_of_data dn;
293 set_new_dnode(&dn, inode, npage, npage, 0);
294 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
298 if (dn.data_blkaddr == NULL_ADDR) {
299 if (reserve_new_block(&dn)) {
302 return ERR_PTR(-ENOSPC);
308 page = grab_cache_page(mapping, index);
310 return ERR_PTR(-ENOMEM);
312 if (PageUptodate(page))
315 if (dn.data_blkaddr == NEW_ADDR) {
316 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
317 SetPageUptodate(page);
319 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
323 if (!PageUptodate(page)) {
324 f2fs_put_page(page, 1);
325 return ERR_PTR(-EIO);
327 if (page->mapping != mapping) {
328 f2fs_put_page(page, 1);
334 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
335 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
336 mark_inode_dirty_sync(inode);
341 static void read_end_io(struct bio *bio, int err)
343 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
344 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
347 struct page *page = bvec->bv_page;
349 if (--bvec >= bio->bi_io_vec)
350 prefetchw(&bvec->bv_page->flags);
353 SetPageUptodate(page);
355 ClearPageUptodate(page);
359 } while (bvec >= bio->bi_io_vec);
360 kfree(bio->bi_private);
365 * Fill the locked page with data located in the block address.
366 * Return unlocked page.
368 int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
369 block_t blk_addr, int type)
371 struct block_device *bdev = sbi->sb->s_bdev;
374 trace_f2fs_readpage(page, blk_addr, type);
376 down_read(&sbi->bio_sem);
378 /* Allocate a new bio */
379 bio = f2fs_bio_alloc(bdev, 1);
381 /* Initialize the bio */
382 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
383 bio->bi_end_io = read_end_io;
385 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
386 kfree(bio->bi_private);
388 up_read(&sbi->bio_sem);
389 f2fs_put_page(page, 1);
393 submit_bio(type, bio);
394 up_read(&sbi->bio_sem);
399 * This function should be used by the data read flow only where it
400 * does not check the "create" flag that indicates block allocation.
401 * The reason for this special functionality is to exploit VFS readahead
404 static int get_data_block_ro(struct inode *inode, sector_t iblock,
405 struct buffer_head *bh_result, int create)
407 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
408 unsigned maxblocks = bh_result->b_size >> blkbits;
409 struct dnode_of_data dn;
413 /* Get the page offset from the block offset(iblock) */
414 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
416 if (check_extent_cache(inode, pgofs, bh_result)) {
417 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
421 /* When reading holes, we need its node page */
422 set_new_dnode(&dn, inode, NULL, NULL, 0);
423 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
425 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
426 return (err == -ENOENT) ? 0 : err;
429 /* It does not support data allocation */
432 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
434 unsigned int end_offset;
436 end_offset = IS_INODE(dn.node_page) ?
440 clear_buffer_new(bh_result);
442 /* Give more consecutive addresses for the read ahead */
443 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
444 if (((datablock_addr(dn.node_page,
446 != (dn.data_blkaddr + i)) || maxblocks == i)
448 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
449 bh_result->b_size = (i << blkbits);
452 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
456 static int f2fs_read_data_page(struct file *file, struct page *page)
458 return mpage_readpage(page, get_data_block_ro);
461 static int f2fs_read_data_pages(struct file *file,
462 struct address_space *mapping,
463 struct list_head *pages, unsigned nr_pages)
465 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
468 int do_write_data_page(struct page *page)
470 struct inode *inode = page->mapping->host;
471 block_t old_blk_addr, new_blk_addr;
472 struct dnode_of_data dn;
475 set_new_dnode(&dn, inode, NULL, NULL, 0);
476 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
480 old_blk_addr = dn.data_blkaddr;
482 /* This page is already truncated */
483 if (old_blk_addr == NULL_ADDR)
486 set_page_writeback(page);
489 * If current allocation needs SSR,
490 * it had better in-place writes for updated data.
492 if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
493 need_inplace_update(inode)) {
494 rewrite_data_page(F2FS_SB(inode->i_sb), page,
497 write_data_page(inode, page, &dn,
498 old_blk_addr, &new_blk_addr);
499 update_extent_cache(new_blk_addr, &dn);
506 static int f2fs_write_data_page(struct page *page,
507 struct writeback_control *wbc)
509 struct inode *inode = page->mapping->host;
510 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
511 loff_t i_size = i_size_read(inode);
512 const pgoff_t end_index = ((unsigned long long) i_size)
515 bool need_balance_fs = false;
518 if (page->index < end_index)
522 * If the offset is out-of-range of file size,
523 * this page does not have to be written to disk.
525 offset = i_size & (PAGE_CACHE_SIZE - 1);
526 if ((page->index >= end_index + 1) || !offset) {
527 if (S_ISDIR(inode->i_mode)) {
528 dec_page_count(sbi, F2FS_DIRTY_DENTS);
529 inode_dec_dirty_dents(inode);
534 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
536 if (sbi->por_doing) {
537 err = AOP_WRITEPAGE_ACTIVATE;
541 /* Dentry blocks are controlled by checkpoint */
542 if (S_ISDIR(inode->i_mode)) {
543 dec_page_count(sbi, F2FS_DIRTY_DENTS);
544 inode_dec_dirty_dents(inode);
545 err = do_write_data_page(page);
547 int ilock = mutex_lock_op(sbi);
548 err = do_write_data_page(page);
549 mutex_unlock_op(sbi, ilock);
550 need_balance_fs = true;
557 if (wbc->for_reclaim)
558 f2fs_submit_bio(sbi, DATA, true);
560 clear_cold_data(page);
564 f2fs_balance_fs(sbi);
568 wbc->pages_skipped++;
569 set_page_dirty(page);
573 #define MAX_DESIRED_PAGES_WP 4096
575 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
578 struct address_space *mapping = data;
579 int ret = mapping->a_ops->writepage(page, wbc);
580 mapping_set_error(mapping, ret);
584 static int f2fs_write_data_pages(struct address_space *mapping,
585 struct writeback_control *wbc)
587 struct inode *inode = mapping->host;
588 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
591 long excess_nrtw = 0, desired_nrtw;
593 /* deal with chardevs and other special file */
594 if (!mapping->a_ops->writepage)
597 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
598 desired_nrtw = MAX_DESIRED_PAGES_WP;
599 excess_nrtw = desired_nrtw - wbc->nr_to_write;
600 wbc->nr_to_write = desired_nrtw;
603 if (!S_ISDIR(inode->i_mode)) {
604 mutex_lock(&sbi->writepages);
607 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
609 mutex_unlock(&sbi->writepages);
610 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
612 remove_dirty_dir_inode(inode);
614 wbc->nr_to_write -= excess_nrtw;
618 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
619 loff_t pos, unsigned len, unsigned flags,
620 struct page **pagep, void **fsdata)
622 struct inode *inode = mapping->host;
623 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
625 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
626 struct dnode_of_data dn;
630 /* for nobh_write_end */
633 f2fs_balance_fs(sbi);
635 page = grab_cache_page_write_begin(mapping, index, flags);
640 ilock = mutex_lock_op(sbi);
642 set_new_dnode(&dn, inode, NULL, NULL, 0);
643 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
647 if (dn.data_blkaddr == NULL_ADDR)
648 err = reserve_new_block(&dn);
654 mutex_unlock_op(sbi, ilock);
656 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
659 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
660 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
661 unsigned end = start + len;
663 /* Reading beyond i_size is simple: memset to zero */
664 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
668 if (dn.data_blkaddr == NEW_ADDR) {
669 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
671 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
675 if (!PageUptodate(page)) {
676 f2fs_put_page(page, 1);
679 if (page->mapping != mapping) {
680 f2fs_put_page(page, 1);
685 SetPageUptodate(page);
686 clear_cold_data(page);
690 mutex_unlock_op(sbi, ilock);
691 f2fs_put_page(page, 1);
695 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
696 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
698 struct file *file = iocb->ki_filp;
699 struct inode *inode = file->f_mapping->host;
704 /* Needs synchronization with the cleaner */
705 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
709 static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
711 struct inode *inode = page->mapping->host;
712 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
713 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
714 dec_page_count(sbi, F2FS_DIRTY_DENTS);
715 inode_dec_dirty_dents(inode);
717 ClearPagePrivate(page);
720 static int f2fs_release_data_page(struct page *page, gfp_t wait)
722 ClearPagePrivate(page);
726 static int f2fs_set_data_page_dirty(struct page *page)
728 struct address_space *mapping = page->mapping;
729 struct inode *inode = mapping->host;
731 SetPageUptodate(page);
732 if (!PageDirty(page)) {
733 __set_page_dirty_nobuffers(page);
734 set_dirty_dir_page(inode, page);
740 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
742 return generic_block_bmap(mapping, block, get_data_block_ro);
745 const struct address_space_operations f2fs_dblock_aops = {
746 .readpage = f2fs_read_data_page,
747 .readpages = f2fs_read_data_pages,
748 .writepage = f2fs_write_data_page,
749 .writepages = f2fs_write_data_pages,
750 .write_begin = f2fs_write_begin,
751 .write_end = nobh_write_end,
752 .set_page_dirty = f2fs_set_data_page_dirty,
753 .invalidatepage = f2fs_invalidate_data_page,
754 .releasepage = f2fs_release_data_page,
755 .direct_IO = f2fs_direct_IO,