4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
26 #include <trace/events/f2fs.h>
28 static struct kmem_cache *extent_tree_slab;
29 static struct kmem_cache *extent_node_slab;
31 static void f2fs_read_end_io(struct bio *bio, int err)
36 bio_for_each_segment_all(bvec, bio, i) {
37 struct page *page = bvec->bv_page;
40 SetPageUptodate(page);
42 ClearPageUptodate(page);
50 static void f2fs_write_end_io(struct bio *bio, int err)
52 struct f2fs_sb_info *sbi = bio->bi_private;
56 bio_for_each_segment_all(bvec, bio, i) {
57 struct page *page = bvec->bv_page;
61 set_bit(AS_EIO, &page->mapping->flags);
62 f2fs_stop_checkpoint(sbi);
64 end_page_writeback(page);
65 dec_page_count(sbi, F2FS_WRITEBACK);
68 if (!get_pages(sbi, F2FS_WRITEBACK) &&
69 !list_empty(&sbi->cp_wait.task_list))
70 wake_up(&sbi->cp_wait);
76 * Low-level block read/write IO operations.
78 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
79 int npages, bool is_read)
83 /* No failure on bio allocation */
84 bio = bio_alloc(GFP_NOIO, npages);
86 bio->bi_bdev = sbi->sb->s_bdev;
87 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
88 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
89 bio->bi_private = sbi;
94 static void __submit_merged_bio(struct f2fs_bio_info *io)
96 struct f2fs_io_info *fio = &io->fio;
101 if (is_read_io(fio->rw))
102 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
104 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
106 submit_bio(fio->rw, io->bio);
110 void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
111 enum page_type type, int rw)
113 enum page_type btype = PAGE_TYPE_OF_BIO(type);
114 struct f2fs_bio_info *io;
116 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
118 down_write(&io->io_rwsem);
120 /* change META to META_FLUSH in the checkpoint procedure */
121 if (type >= META_FLUSH) {
122 io->fio.type = META_FLUSH;
123 if (test_opt(sbi, NOBARRIER))
124 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
126 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
128 __submit_merged_bio(io);
129 up_write(&io->io_rwsem);
133 * Fill the locked page with data located in the block address.
134 * Return unlocked page.
136 int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
137 struct f2fs_io_info *fio)
141 trace_f2fs_submit_page_bio(page, fio);
142 f2fs_trace_ios(page, fio, 0);
144 /* Allocate a new bio */
145 bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw));
147 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
149 f2fs_put_page(page, 1);
153 submit_bio(fio->rw, bio);
157 void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
158 struct f2fs_io_info *fio)
160 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
161 struct f2fs_bio_info *io;
162 bool is_read = is_read_io(fio->rw);
164 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
166 verify_block_addr(sbi, fio->blk_addr);
168 down_write(&io->io_rwsem);
171 inc_page_count(sbi, F2FS_WRITEBACK);
173 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
174 io->fio.rw != fio->rw))
175 __submit_merged_bio(io);
177 if (io->bio == NULL) {
178 int bio_blocks = MAX_BIO_BLOCKS(sbi);
180 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
184 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
186 __submit_merged_bio(io);
190 io->last_block_in_bio = fio->blk_addr;
191 f2fs_trace_ios(page, fio, 0);
193 up_write(&io->io_rwsem);
194 trace_f2fs_submit_page_mbio(page, fio);
198 * Lock ordering for the change of data block address:
201 * update block addresses in the node page
203 static void __set_data_blkaddr(struct dnode_of_data *dn)
205 struct f2fs_node *rn;
207 struct page *node_page = dn->node_page;
208 unsigned int ofs_in_node = dn->ofs_in_node;
210 f2fs_wait_on_page_writeback(node_page, NODE);
212 rn = F2FS_NODE(node_page);
214 /* Get physical address of data block */
215 addr_array = blkaddr_in_node(rn);
216 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
217 set_page_dirty(node_page);
220 int reserve_new_block(struct dnode_of_data *dn)
222 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
224 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
226 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
229 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
231 dn->data_blkaddr = NEW_ADDR;
232 __set_data_blkaddr(dn);
233 mark_inode_dirty(dn->inode);
238 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
240 bool need_put = dn->inode_page ? false : true;
243 err = get_dnode_of_data(dn, index, ALLOC_NODE);
247 if (dn->data_blkaddr == NULL_ADDR)
248 err = reserve_new_block(dn);
254 static void f2fs_map_bh(struct super_block *sb, pgoff_t pgofs,
255 struct extent_info *ei, struct buffer_head *bh_result)
257 unsigned int blkbits = sb->s_blocksize_bits;
260 set_buffer_new(bh_result);
261 map_bh(bh_result, sb, ei->blk + pgofs - ei->fofs);
262 count = ei->fofs + ei->len - pgofs;
263 if (count < (UINT_MAX >> blkbits))
264 bh_result->b_size = (count << blkbits);
266 bh_result->b_size = UINT_MAX;
269 static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
270 struct extent_info *ei)
272 struct f2fs_inode_info *fi = F2FS_I(inode);
273 pgoff_t start_fofs, end_fofs;
274 block_t start_blkaddr;
276 if (is_inode_flag_set(fi, FI_NO_EXTENT))
279 read_lock(&fi->ext_lock);
280 if (fi->ext.len == 0) {
281 read_unlock(&fi->ext_lock);
285 stat_inc_total_hit(inode->i_sb);
287 start_fofs = fi->ext.fofs;
288 end_fofs = fi->ext.fofs + fi->ext.len - 1;
289 start_blkaddr = fi->ext.blk;
291 if (pgofs >= start_fofs && pgofs <= end_fofs) {
293 stat_inc_read_hit(inode->i_sb);
294 read_unlock(&fi->ext_lock);
297 read_unlock(&fi->ext_lock);
301 static bool update_extent_info(struct inode *inode, pgoff_t fofs,
304 struct f2fs_inode_info *fi = F2FS_I(inode);
305 pgoff_t start_fofs, end_fofs;
306 block_t start_blkaddr, end_blkaddr;
307 int need_update = true;
309 if (is_inode_flag_set(fi, FI_NO_EXTENT))
312 write_lock(&fi->ext_lock);
314 start_fofs = fi->ext.fofs;
315 end_fofs = fi->ext.fofs + fi->ext.len - 1;
316 start_blkaddr = fi->ext.blk;
317 end_blkaddr = fi->ext.blk + fi->ext.len - 1;
319 /* Drop and initialize the matched extent */
320 if (fi->ext.len == 1 && fofs == start_fofs)
324 if (fi->ext.len == 0) {
325 if (blkaddr != NULL_ADDR) {
327 fi->ext.blk = blkaddr;
334 if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
342 if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
347 /* Split the existing extent */
348 if (fi->ext.len > 1 &&
349 fofs >= start_fofs && fofs <= end_fofs) {
350 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
351 fi->ext.len = fofs - start_fofs;
353 fi->ext.fofs = fofs + 1;
354 fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
355 fi->ext.len -= fofs - start_fofs + 1;
361 /* Finally, if the extent is very fragmented, let's drop the cache. */
362 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
364 set_inode_flag(fi, FI_NO_EXTENT);
368 write_unlock(&fi->ext_lock);
372 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
373 struct extent_tree *et, struct extent_info *ei,
374 struct rb_node *parent, struct rb_node **p)
376 struct extent_node *en;
378 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
383 INIT_LIST_HEAD(&en->list);
385 rb_link_node(&en->rb_node, parent, p);
386 rb_insert_color(&en->rb_node, &et->root);
388 atomic_inc(&sbi->total_ext_node);
392 static void __detach_extent_node(struct f2fs_sb_info *sbi,
393 struct extent_tree *et, struct extent_node *en)
395 rb_erase(&en->rb_node, &et->root);
397 atomic_dec(&sbi->total_ext_node);
399 if (et->cached_en == en)
400 et->cached_en = NULL;
403 static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
406 struct rb_node *node = et->root.rb_node;
407 struct extent_node *en;
410 struct extent_info *cei = &et->cached_en->ei;
412 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
413 return et->cached_en;
417 en = rb_entry(node, struct extent_node, rb_node);
419 if (fofs < en->ei.fofs) {
420 node = node->rb_left;
421 } else if (fofs >= en->ei.fofs + en->ei.len) {
422 node = node->rb_right;
431 static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
432 struct extent_tree *et, struct extent_node *en)
434 struct extent_node *prev;
435 struct rb_node *node;
437 node = rb_prev(&en->rb_node);
441 prev = rb_entry(node, struct extent_node, rb_node);
442 if (__is_back_mergeable(&en->ei, &prev->ei)) {
443 en->ei.fofs = prev->ei.fofs;
444 en->ei.blk = prev->ei.blk;
445 en->ei.len += prev->ei.len;
446 __detach_extent_node(sbi, et, prev);
452 static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
453 struct extent_tree *et, struct extent_node *en)
455 struct extent_node *next;
456 struct rb_node *node;
458 node = rb_next(&en->rb_node);
462 next = rb_entry(node, struct extent_node, rb_node);
463 if (__is_front_mergeable(&en->ei, &next->ei)) {
464 en->ei.len += next->ei.len;
465 __detach_extent_node(sbi, et, next);
471 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
472 struct extent_tree *et, struct extent_info *ei,
473 struct extent_node **den)
475 struct rb_node **p = &et->root.rb_node;
476 struct rb_node *parent = NULL;
477 struct extent_node *en;
481 en = rb_entry(parent, struct extent_node, rb_node);
483 if (ei->fofs < en->ei.fofs) {
484 if (__is_front_mergeable(ei, &en->ei)) {
485 f2fs_bug_on(sbi, !den);
486 en->ei.fofs = ei->fofs;
487 en->ei.blk = ei->blk;
488 en->ei.len += ei->len;
489 *den = __try_back_merge(sbi, et, en);
493 } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
494 if (__is_back_mergeable(ei, &en->ei)) {
495 f2fs_bug_on(sbi, !den);
496 en->ei.len += ei->len;
497 *den = __try_front_merge(sbi, et, en);
506 return __attach_extent_node(sbi, et, ei, parent, p);
509 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
510 struct extent_tree *et, bool free_all)
512 struct rb_node *node, *next;
513 struct extent_node *en;
514 unsigned int count = et->count;
516 node = rb_first(&et->root);
518 next = rb_next(node);
519 en = rb_entry(node, struct extent_node, rb_node);
522 spin_lock(&sbi->extent_lock);
523 if (!list_empty(&en->list))
524 list_del_init(&en->list);
525 spin_unlock(&sbi->extent_lock);
528 if (free_all || list_empty(&en->list)) {
529 __detach_extent_node(sbi, et, en);
530 kmem_cache_free(extent_node_slab, en);
535 return count - et->count;
538 static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
539 struct extent_info *ei)
541 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
542 struct extent_tree *et;
543 struct extent_node *en;
545 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
548 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
550 down_read(&sbi->extent_tree_lock);
551 et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
553 up_read(&sbi->extent_tree_lock);
556 atomic_inc(&et->refcount);
557 up_read(&sbi->extent_tree_lock);
559 read_lock(&et->lock);
560 en = __lookup_extent_tree(et, pgofs);
563 spin_lock(&sbi->extent_lock);
564 if (!list_empty(&en->list))
565 list_move_tail(&en->list, &sbi->extent_list);
566 spin_unlock(&sbi->extent_lock);
567 stat_inc_read_hit(sbi->sb);
569 stat_inc_total_hit(sbi->sb);
570 read_unlock(&et->lock);
572 trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
574 atomic_dec(&et->refcount);
575 return en ? true : false;
578 static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
581 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
582 nid_t ino = inode->i_ino;
583 struct extent_tree *et;
584 struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
585 struct extent_node *den = NULL;
586 struct extent_info ei, dei;
589 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
592 trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
594 down_write(&sbi->extent_tree_lock);
595 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
597 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
598 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
599 memset(et, 0, sizeof(struct extent_tree));
602 et->cached_en = NULL;
603 rwlock_init(&et->lock);
604 atomic_set(&et->refcount, 0);
606 sbi->total_ext_tree++;
608 atomic_inc(&et->refcount);
609 up_write(&sbi->extent_tree_lock);
611 write_lock(&et->lock);
613 /* 1. lookup and remove existing extent info in cache */
614 en = __lookup_extent_tree(et, fofs);
619 __detach_extent_node(sbi, et, en);
621 /* 2. if extent can be split more, split and insert the left part */
623 /* insert left part of split extent into cache */
624 if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
625 set_extent_info(&ei, dei.fofs, dei.blk,
627 en1 = __insert_extent_tree(sbi, et, &ei, NULL);
630 /* insert right part of split extent into cache */
631 endofs = dei.fofs + dei.len - 1;
632 if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
633 set_extent_info(&ei, fofs + 1,
634 fofs - dei.fofs + dei.blk, endofs - fofs);
635 en2 = __insert_extent_tree(sbi, et, &ei, NULL);
640 /* 3. update extent in extent cache */
642 set_extent_info(&ei, fofs, blkaddr, 1);
643 en3 = __insert_extent_tree(sbi, et, &ei, &den);
646 /* 4. update in global extent list */
647 spin_lock(&sbi->extent_lock);
648 if (en && !list_empty(&en->list))
651 * en1 and en2 split from en, they will become more and more smaller
652 * fragments after splitting several times. So if the length is smaller
653 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
656 list_add_tail(&en1->list, &sbi->extent_list);
658 list_add_tail(&en2->list, &sbi->extent_list);
660 if (list_empty(&en3->list))
661 list_add_tail(&en3->list, &sbi->extent_list);
663 list_move_tail(&en3->list, &sbi->extent_list);
665 if (den && !list_empty(&den->list))
666 list_del(&den->list);
667 spin_unlock(&sbi->extent_lock);
669 /* 5. release extent node */
671 kmem_cache_free(extent_node_slab, en);
673 kmem_cache_free(extent_node_slab, den);
675 write_unlock(&et->lock);
676 atomic_dec(&et->refcount);
679 void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
681 struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
682 struct extent_node *en, *tmp;
683 unsigned long ino = F2FS_ROOT_INO(sbi);
684 struct radix_tree_iter iter;
687 unsigned int node_cnt = 0, tree_cnt = 0;
689 if (!test_opt(sbi, EXTENT_CACHE))
692 if (available_free_memory(sbi, EXTENT_CACHE))
695 spin_lock(&sbi->extent_lock);
696 list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
699 list_del_init(&en->list);
701 spin_unlock(&sbi->extent_lock);
703 down_read(&sbi->extent_tree_lock);
704 while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
705 (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
708 ino = treevec[found - 1]->ino + 1;
709 for (i = 0; i < found; i++) {
710 struct extent_tree *et = treevec[i];
712 atomic_inc(&et->refcount);
713 write_lock(&et->lock);
714 node_cnt += __free_extent_tree(sbi, et, false);
715 write_unlock(&et->lock);
716 atomic_dec(&et->refcount);
719 up_read(&sbi->extent_tree_lock);
721 down_write(&sbi->extent_tree_lock);
722 radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
723 F2FS_ROOT_INO(sbi)) {
724 struct extent_tree *et = (struct extent_tree *)*slot;
726 if (!atomic_read(&et->refcount) && !et->count) {
727 radix_tree_delete(&sbi->extent_tree_root, et->ino);
728 kmem_cache_free(extent_tree_slab, et);
729 sbi->total_ext_tree--;
733 up_write(&sbi->extent_tree_lock);
735 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
738 void f2fs_destroy_extent_tree(struct inode *inode)
740 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
741 struct extent_tree *et;
742 unsigned int node_cnt = 0;
744 if (!test_opt(sbi, EXTENT_CACHE))
747 down_read(&sbi->extent_tree_lock);
748 et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
750 up_read(&sbi->extent_tree_lock);
753 atomic_inc(&et->refcount);
754 up_read(&sbi->extent_tree_lock);
756 /* free all extent info belong to this extent tree */
757 write_lock(&et->lock);
758 node_cnt = __free_extent_tree(sbi, et, true);
759 write_unlock(&et->lock);
761 atomic_dec(&et->refcount);
763 /* try to find and delete extent tree entry in radix tree */
764 down_write(&sbi->extent_tree_lock);
765 et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
767 up_write(&sbi->extent_tree_lock);
770 f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
771 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
772 kmem_cache_free(extent_tree_slab, et);
773 sbi->total_ext_tree--;
774 up_write(&sbi->extent_tree_lock);
776 trace_f2fs_destroy_extent_tree(inode, node_cnt);
780 static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
781 struct extent_info *ei)
783 if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
784 return f2fs_lookup_extent_tree(inode, pgofs, ei);
786 return lookup_extent_info(inode, pgofs, ei);
789 void f2fs_update_extent_cache(struct dnode_of_data *dn)
791 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
794 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
796 /* Update the page address in the parent node */
797 __set_data_blkaddr(dn);
799 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
802 if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
803 return f2fs_update_extent_tree(dn->inode, fofs,
806 if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
810 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
812 struct address_space *mapping = inode->i_mapping;
813 struct dnode_of_data dn;
816 struct f2fs_io_info fio = {
818 .rw = sync ? READ_SYNC : READA,
821 page = find_get_page(mapping, index);
822 if (page && PageUptodate(page))
824 f2fs_put_page(page, 0);
826 set_new_dnode(&dn, inode, NULL, NULL, 0);
827 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
832 if (dn.data_blkaddr == NULL_ADDR)
833 return ERR_PTR(-ENOENT);
835 /* By fallocate(), there is no cached page, but with NEW_ADDR */
836 if (unlikely(dn.data_blkaddr == NEW_ADDR))
837 return ERR_PTR(-EINVAL);
839 page = grab_cache_page(mapping, index);
841 return ERR_PTR(-ENOMEM);
843 if (PageUptodate(page)) {
848 fio.blk_addr = dn.data_blkaddr;
849 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
854 wait_on_page_locked(page);
855 if (unlikely(!PageUptodate(page))) {
856 f2fs_put_page(page, 0);
857 return ERR_PTR(-EIO);
864 * If it tries to access a hole, return an error.
865 * Because, the callers, functions in dir.c and GC, should be able to know
866 * whether this page exists or not.
868 struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
870 struct address_space *mapping = inode->i_mapping;
871 struct dnode_of_data dn;
874 struct f2fs_io_info fio = {
879 page = grab_cache_page(mapping, index);
881 return ERR_PTR(-ENOMEM);
883 set_new_dnode(&dn, inode, NULL, NULL, 0);
884 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
886 f2fs_put_page(page, 1);
891 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
892 f2fs_put_page(page, 1);
893 return ERR_PTR(-ENOENT);
896 if (PageUptodate(page))
900 * A new dentry page is allocated but not able to be written, since its
901 * new inode page couldn't be allocated due to -ENOSPC.
902 * In such the case, its blkaddr can be remained as NEW_ADDR.
903 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
905 if (dn.data_blkaddr == NEW_ADDR) {
906 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
907 SetPageUptodate(page);
911 fio.blk_addr = dn.data_blkaddr;
912 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
917 if (unlikely(!PageUptodate(page))) {
918 f2fs_put_page(page, 1);
919 return ERR_PTR(-EIO);
921 if (unlikely(page->mapping != mapping)) {
922 f2fs_put_page(page, 1);
929 * Caller ensures that this data page is never allocated.
930 * A new zero-filled data page is allocated in the page cache.
932 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
934 * Note that, ipage is set only by make_empty_dir.
936 struct page *get_new_data_page(struct inode *inode,
937 struct page *ipage, pgoff_t index, bool new_i_size)
939 struct address_space *mapping = inode->i_mapping;
941 struct dnode_of_data dn;
944 set_new_dnode(&dn, inode, ipage, NULL, 0);
945 err = f2fs_reserve_block(&dn, index);
949 page = grab_cache_page(mapping, index);
955 if (PageUptodate(page))
958 if (dn.data_blkaddr == NEW_ADDR) {
959 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
960 SetPageUptodate(page);
962 struct f2fs_io_info fio = {
965 .blk_addr = dn.data_blkaddr,
967 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
972 if (unlikely(!PageUptodate(page))) {
973 f2fs_put_page(page, 1);
977 if (unlikely(page->mapping != mapping)) {
978 f2fs_put_page(page, 1);
984 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
985 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
986 /* Only the directory inode sets new_i_size */
987 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
996 static int __allocate_data_block(struct dnode_of_data *dn)
998 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
999 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
1000 struct f2fs_summary sum;
1001 struct node_info ni;
1002 int seg = CURSEG_WARM_DATA;
1005 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
1007 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
1010 get_node_info(sbi, dn->nid, &ni);
1011 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1013 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
1014 seg = CURSEG_DIRECT_IO;
1016 allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
1018 /* direct IO doesn't use extent cache to maximize the performance */
1019 __set_data_blkaddr(dn);
1022 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
1024 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
1025 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
1030 static void __allocate_data_blocks(struct inode *inode, loff_t offset,
1033 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1034 struct dnode_of_data dn;
1035 u64 start = F2FS_BYTES_TO_BLK(offset);
1036 u64 len = F2FS_BYTES_TO_BLK(count);
1041 f2fs_balance_fs(sbi);
1044 /* When reading holes, we need its node page */
1045 set_new_dnode(&dn, inode, NULL, NULL, 0);
1046 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
1050 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
1052 while (dn.ofs_in_node < end_offset && len) {
1053 if (dn.data_blkaddr == NULL_ADDR) {
1054 if (__allocate_data_block(&dn))
1064 sync_inode_page(&dn);
1066 f2fs_put_dnode(&dn);
1067 f2fs_unlock_op(sbi);
1073 sync_inode_page(&dn);
1074 f2fs_put_dnode(&dn);
1076 f2fs_unlock_op(sbi);
1081 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
1082 * If original data blocks are allocated, then give them to blockdev.
1084 * a. preallocate requested block addresses
1085 * b. do not use extent cache for better performance
1086 * c. give the block addresses to blockdev
1088 static int __get_data_block(struct inode *inode, sector_t iblock,
1089 struct buffer_head *bh_result, int create, bool fiemap)
1091 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
1092 unsigned maxblocks = bh_result->b_size >> blkbits;
1093 struct dnode_of_data dn;
1094 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
1095 pgoff_t pgofs, end_offset;
1096 int err = 0, ofs = 1;
1097 struct extent_info ei;
1098 bool allocated = false;
1100 /* Get the page offset from the block offset(iblock) */
1101 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
1103 if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1104 f2fs_map_bh(inode->i_sb, pgofs, &ei, bh_result);
1109 f2fs_lock_op(F2FS_I_SB(inode));
1111 /* When reading holes, we need its node page */
1112 set_new_dnode(&dn, inode, NULL, NULL, 0);
1113 err = get_dnode_of_data(&dn, pgofs, mode);
1119 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1122 if (dn.data_blkaddr != NULL_ADDR) {
1123 set_buffer_new(bh_result);
1124 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
1125 } else if (create) {
1126 err = __allocate_data_block(&dn);
1130 set_buffer_new(bh_result);
1131 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
1136 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
1137 bh_result->b_size = (((size_t)1) << blkbits);
1142 if (dn.ofs_in_node >= end_offset) {
1144 sync_inode_page(&dn);
1146 f2fs_put_dnode(&dn);
1148 set_new_dnode(&dn, inode, NULL, NULL, 0);
1149 err = get_dnode_of_data(&dn, pgofs, mode);
1155 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1158 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
1161 if (maxblocks > (bh_result->b_size >> blkbits)) {
1162 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
1163 if (blkaddr == NULL_ADDR && create) {
1164 err = __allocate_data_block(&dn);
1168 blkaddr = dn.data_blkaddr;
1170 /* Give more consecutive addresses for the readahead */
1171 if (blkaddr == (bh_result->b_blocknr + ofs)) {
1175 bh_result->b_size += (((size_t)1) << blkbits);
1181 sync_inode_page(&dn);
1183 f2fs_put_dnode(&dn);
1186 f2fs_unlock_op(F2FS_I_SB(inode));
1188 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
1192 static int get_data_block(struct inode *inode, sector_t iblock,
1193 struct buffer_head *bh_result, int create)
1195 return __get_data_block(inode, iblock, bh_result, create, false);
1198 static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
1199 struct buffer_head *bh_result, int create)
1201 return __get_data_block(inode, iblock, bh_result, create, true);
1204 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1207 return generic_block_fiemap(inode, fieinfo,
1208 start, len, get_data_block_fiemap);
1211 static int f2fs_read_data_page(struct file *file, struct page *page)
1213 struct inode *inode = page->mapping->host;
1216 trace_f2fs_readpage(page, DATA);
1218 /* If the file has inline data, try to read it directly */
1219 if (f2fs_has_inline_data(inode))
1220 ret = f2fs_read_inline_data(inode, page);
1222 ret = mpage_readpage(page, get_data_block);
1227 static int f2fs_read_data_pages(struct file *file,
1228 struct address_space *mapping,
1229 struct list_head *pages, unsigned nr_pages)
1231 struct inode *inode = file->f_mapping->host;
1233 /* If the file has inline data, skip readpages */
1234 if (f2fs_has_inline_data(inode))
1237 return mpage_readpages(mapping, pages, nr_pages, get_data_block);
1240 int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
1242 struct inode *inode = page->mapping->host;
1243 struct dnode_of_data dn;
1246 set_new_dnode(&dn, inode, NULL, NULL, 0);
1247 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1251 fio->blk_addr = dn.data_blkaddr;
1253 /* This page is already truncated */
1254 if (fio->blk_addr == NULL_ADDR)
1257 set_page_writeback(page);
1260 * If current allocation needs SSR,
1261 * it had better in-place writes for updated data.
1263 if (unlikely(fio->blk_addr != NEW_ADDR &&
1264 !is_cold_data(page) &&
1265 need_inplace_update(inode))) {
1266 rewrite_data_page(page, fio);
1267 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1269 write_data_page(page, &dn, fio);
1270 f2fs_update_extent_cache(&dn);
1271 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1274 f2fs_put_dnode(&dn);
1278 static int f2fs_write_data_page(struct page *page,
1279 struct writeback_control *wbc)
1281 struct inode *inode = page->mapping->host;
1282 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1283 loff_t i_size = i_size_read(inode);
1284 const pgoff_t end_index = ((unsigned long long) i_size)
1285 >> PAGE_CACHE_SHIFT;
1286 unsigned offset = 0;
1287 bool need_balance_fs = false;
1289 struct f2fs_io_info fio = {
1291 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1294 trace_f2fs_writepage(page, DATA);
1296 if (page->index < end_index)
1300 * If the offset is out-of-range of file size,
1301 * this page does not have to be written to disk.
1303 offset = i_size & (PAGE_CACHE_SIZE - 1);
1304 if ((page->index >= end_index + 1) || !offset)
1307 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1309 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1311 if (f2fs_is_drop_cache(inode))
1313 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
1314 available_free_memory(sbi, BASE_CHECK))
1317 /* Dentry blocks are controlled by checkpoint */
1318 if (S_ISDIR(inode->i_mode)) {
1319 if (unlikely(f2fs_cp_error(sbi)))
1321 err = do_write_data_page(page, &fio);
1325 /* we should bypass data pages to proceed the kworkder jobs */
1326 if (unlikely(f2fs_cp_error(sbi))) {
1331 if (!wbc->for_reclaim)
1332 need_balance_fs = true;
1333 else if (has_not_enough_free_secs(sbi, 0))
1338 if (f2fs_has_inline_data(inode))
1339 err = f2fs_write_inline_data(inode, page);
1341 err = do_write_data_page(page, &fio);
1342 f2fs_unlock_op(sbi);
1344 if (err && err != -ENOENT)
1347 clear_cold_data(page);
1349 inode_dec_dirty_pages(inode);
1351 if (need_balance_fs)
1352 f2fs_balance_fs(sbi);
1353 if (wbc->for_reclaim)
1354 f2fs_submit_merged_bio(sbi, DATA, WRITE);
1358 redirty_page_for_writepage(wbc, page);
1359 return AOP_WRITEPAGE_ACTIVATE;
1362 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
1365 struct address_space *mapping = data;
1366 int ret = mapping->a_ops->writepage(page, wbc);
1367 mapping_set_error(mapping, ret);
1371 static int f2fs_write_data_pages(struct address_space *mapping,
1372 struct writeback_control *wbc)
1374 struct inode *inode = mapping->host;
1375 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1376 bool locked = false;
1380 trace_f2fs_writepages(mapping->host, wbc, DATA);
1382 /* deal with chardevs and other special file */
1383 if (!mapping->a_ops->writepage)
1386 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1387 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1388 available_free_memory(sbi, DIRTY_DENTS))
1391 diff = nr_pages_to_write(sbi, DATA, wbc);
1393 if (!S_ISDIR(inode->i_mode)) {
1394 mutex_lock(&sbi->writepages);
1397 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1399 mutex_unlock(&sbi->writepages);
1401 f2fs_submit_merged_bio(sbi, DATA, WRITE);
1403 remove_dirty_dir_inode(inode);
1405 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1409 wbc->pages_skipped += get_dirty_pages(inode);
1413 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1415 struct inode *inode = mapping->host;
1417 if (to > inode->i_size) {
1418 truncate_pagecache(inode, inode->i_size);
1419 truncate_blocks(inode, inode->i_size, true);
1423 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1424 loff_t pos, unsigned len, unsigned flags,
1425 struct page **pagep, void **fsdata)
1427 struct inode *inode = mapping->host;
1428 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1429 struct page *page, *ipage;
1430 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1431 struct dnode_of_data dn;
1434 trace_f2fs_write_begin(inode, pos, len, flags);
1436 f2fs_balance_fs(sbi);
1439 * We should check this at this moment to avoid deadlock on inode page
1440 * and #0 page. The locking rule for inline_data conversion should be:
1441 * lock_page(page #0) -> lock_page(inode_page)
1444 err = f2fs_convert_inline_inode(inode);
1449 page = grab_cache_page_write_begin(mapping, index, flags);
1459 /* check inline_data */
1460 ipage = get_node_page(sbi, inode->i_ino);
1461 if (IS_ERR(ipage)) {
1462 err = PTR_ERR(ipage);
1466 set_new_dnode(&dn, inode, ipage, ipage, 0);
1468 if (f2fs_has_inline_data(inode)) {
1469 if (pos + len <= MAX_INLINE_DATA) {
1470 read_inline_data(page, ipage);
1471 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1472 sync_inode_page(&dn);
1475 err = f2fs_convert_inline_page(&dn, page);
1479 err = f2fs_reserve_block(&dn, index);
1483 f2fs_put_dnode(&dn);
1484 f2fs_unlock_op(sbi);
1486 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
1489 f2fs_wait_on_page_writeback(page, DATA);
1491 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1492 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1493 unsigned end = start + len;
1495 /* Reading beyond i_size is simple: memset to zero */
1496 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
1500 if (dn.data_blkaddr == NEW_ADDR) {
1501 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1503 struct f2fs_io_info fio = {
1506 .blk_addr = dn.data_blkaddr,
1508 err = f2fs_submit_page_bio(sbi, page, &fio);
1513 if (unlikely(!PageUptodate(page))) {
1514 f2fs_put_page(page, 1);
1518 if (unlikely(page->mapping != mapping)) {
1519 f2fs_put_page(page, 1);
1524 SetPageUptodate(page);
1525 clear_cold_data(page);
1529 f2fs_put_dnode(&dn);
1531 f2fs_unlock_op(sbi);
1532 f2fs_put_page(page, 1);
1534 f2fs_write_failed(mapping, pos + len);
1538 static int f2fs_write_end(struct file *file,
1539 struct address_space *mapping,
1540 loff_t pos, unsigned len, unsigned copied,
1541 struct page *page, void *fsdata)
1543 struct inode *inode = page->mapping->host;
1545 trace_f2fs_write_end(inode, pos, len, copied);
1547 set_page_dirty(page);
1549 if (pos + copied > i_size_read(inode)) {
1550 i_size_write(inode, pos + copied);
1551 mark_inode_dirty(inode);
1552 update_inode_page(inode);
1555 f2fs_put_page(page, 1);
1559 static int check_direct_IO(struct inode *inode, int rw,
1560 struct iov_iter *iter, loff_t offset)
1562 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
1567 if (offset & blocksize_mask)
1570 if (iov_iter_alignment(iter) & blocksize_mask)
1576 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
1577 struct iov_iter *iter, loff_t offset)
1579 struct file *file = iocb->ki_filp;
1580 struct address_space *mapping = file->f_mapping;
1581 struct inode *inode = mapping->host;
1582 size_t count = iov_iter_count(iter);
1585 /* we don't need to use inline_data strictly */
1586 if (f2fs_has_inline_data(inode)) {
1587 err = f2fs_convert_inline_inode(inode);
1592 if (check_direct_IO(inode, rw, iter, offset))
1595 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1598 __allocate_data_blocks(inode, offset, count);
1600 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
1601 if (err < 0 && (rw & WRITE))
1602 f2fs_write_failed(mapping, offset + count);
1604 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1609 void f2fs_invalidate_page(struct page *page, unsigned int offset,
1610 unsigned int length)
1612 struct inode *inode = page->mapping->host;
1613 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1615 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1616 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
1619 if (PageDirty(page)) {
1620 if (inode->i_ino == F2FS_META_INO(sbi))
1621 dec_page_count(sbi, F2FS_DIRTY_META);
1622 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1623 dec_page_count(sbi, F2FS_DIRTY_NODES);
1625 inode_dec_dirty_pages(inode);
1627 ClearPagePrivate(page);
1630 int f2fs_release_page(struct page *page, gfp_t wait)
1632 /* If this is dirty page, keep PagePrivate */
1633 if (PageDirty(page))
1636 ClearPagePrivate(page);
1640 static int f2fs_set_data_page_dirty(struct page *page)
1642 struct address_space *mapping = page->mapping;
1643 struct inode *inode = mapping->host;
1645 trace_f2fs_set_page_dirty(page, DATA);
1647 SetPageUptodate(page);
1649 if (f2fs_is_atomic_file(inode)) {
1650 register_inmem_page(inode, page);
1654 mark_inode_dirty(inode);
1656 if (!PageDirty(page)) {
1657 __set_page_dirty_nobuffers(page);
1658 update_dirty_page(inode, page);
1664 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1666 struct inode *inode = mapping->host;
1668 /* we don't need to use inline_data strictly */
1669 if (f2fs_has_inline_data(inode)) {
1670 int err = f2fs_convert_inline_inode(inode);
1674 return generic_block_bmap(mapping, block, get_data_block);
1677 void init_extent_cache_info(struct f2fs_sb_info *sbi)
1679 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
1680 init_rwsem(&sbi->extent_tree_lock);
1681 INIT_LIST_HEAD(&sbi->extent_list);
1682 spin_lock_init(&sbi->extent_lock);
1683 sbi->total_ext_tree = 0;
1684 atomic_set(&sbi->total_ext_node, 0);
1687 int __init create_extent_cache(void)
1689 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1690 sizeof(struct extent_tree));
1691 if (!extent_tree_slab)
1693 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1694 sizeof(struct extent_node));
1695 if (!extent_node_slab) {
1696 kmem_cache_destroy(extent_tree_slab);
1702 void destroy_extent_cache(void)
1704 kmem_cache_destroy(extent_node_slab);
1705 kmem_cache_destroy(extent_tree_slab);
1708 const struct address_space_operations f2fs_dblock_aops = {
1709 .readpage = f2fs_read_data_page,
1710 .readpages = f2fs_read_data_pages,
1711 .writepage = f2fs_write_data_page,
1712 .writepages = f2fs_write_data_pages,
1713 .write_begin = f2fs_write_begin,
1714 .write_end = f2fs_write_end,
1715 .set_page_dirty = f2fs_set_data_page_dirty,
1716 .invalidatepage = f2fs_invalidate_page,
1717 .releasepage = f2fs_release_page,
1718 .direct_IO = f2fs_direct_IO,