f2fs: skip writing in ->writepages when no dirty pages exist
[firefly-linux-kernel-4.4.55.git] / fs / f2fs / data.c
1 /*
2  * fs/f2fs/data.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 #include <linux/uio.h>
22 #include <linux/cleancache.h>
23
24 #include "f2fs.h"
25 #include "node.h"
26 #include "segment.h"
27 #include "trace.h"
28 #include <trace/events/f2fs.h>
29
30 static void f2fs_read_end_io(struct bio *bio, int err)
31 {
32         struct bio_vec *bvec;
33         int i;
34
35         if (f2fs_bio_encrypted(bio)) {
36                 if (err) {
37                         f2fs_release_crypto_ctx(bio->bi_private);
38                 } else {
39                         f2fs_end_io_crypto_work(bio->bi_private, bio);
40                         return;
41                 }
42         }
43
44         bio_for_each_segment_all(bvec, bio, i) {
45                 struct page *page = bvec->bv_page;
46
47                 if (!err) {
48                         SetPageUptodate(page);
49                 } else {
50                         ClearPageUptodate(page);
51                         SetPageError(page);
52                 }
53                 unlock_page(page);
54         }
55         bio_put(bio);
56 }
57
58 static void f2fs_write_end_io(struct bio *bio, int err)
59 {
60         struct f2fs_sb_info *sbi = bio->bi_private;
61         struct bio_vec *bvec;
62         int i;
63
64         bio_for_each_segment_all(bvec, bio, i) {
65                 struct page *page = bvec->bv_page;
66
67                 f2fs_restore_and_release_control_page(&page);
68
69                 if (unlikely(err)) {
70                         set_page_dirty(page);
71                         set_bit(AS_EIO, &page->mapping->flags);
72                         f2fs_stop_checkpoint(sbi);
73                 }
74                 end_page_writeback(page);
75                 dec_page_count(sbi, F2FS_WRITEBACK);
76         }
77
78         if (!get_pages(sbi, F2FS_WRITEBACK) &&
79                         !list_empty(&sbi->cp_wait.task_list))
80                 wake_up(&sbi->cp_wait);
81
82         bio_put(bio);
83 }
84
85 /*
86  * Low-level block read/write IO operations.
87  */
88 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
89                                 int npages, bool is_read)
90 {
91         struct bio *bio;
92
93         /* No failure on bio allocation */
94         bio = bio_alloc(GFP_NOIO, npages);
95
96         bio->bi_bdev = sbi->sb->s_bdev;
97         bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
98         bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
99         bio->bi_private = is_read ? NULL : sbi;
100
101         return bio;
102 }
103
104 static void __submit_merged_bio(struct f2fs_bio_info *io)
105 {
106         struct f2fs_io_info *fio = &io->fio;
107
108         if (!io->bio)
109                 return;
110
111         if (is_read_io(fio->rw))
112                 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
113         else
114                 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
115
116         submit_bio(fio->rw, io->bio);
117         io->bio = NULL;
118 }
119
120 void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
121                                 enum page_type type, int rw)
122 {
123         enum page_type btype = PAGE_TYPE_OF_BIO(type);
124         struct f2fs_bio_info *io;
125
126         io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
127
128         down_write(&io->io_rwsem);
129
130         /* change META to META_FLUSH in the checkpoint procedure */
131         if (type >= META_FLUSH) {
132                 io->fio.type = META_FLUSH;
133                 if (test_opt(sbi, NOBARRIER))
134                         io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
135                 else
136                         io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
137         }
138         __submit_merged_bio(io);
139         up_write(&io->io_rwsem);
140 }
141
142 /*
143  * Fill the locked page with data located in the block address.
144  * Return unlocked page.
145  */
146 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
147 {
148         struct bio *bio;
149         struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
150
151         trace_f2fs_submit_page_bio(page, fio);
152         f2fs_trace_ios(fio, 0);
153
154         /* Allocate a new bio */
155         bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
156
157         if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
158                 bio_put(bio);
159                 return -EFAULT;
160         }
161
162         submit_bio(fio->rw, bio);
163         return 0;
164 }
165
166 void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
167 {
168         struct f2fs_sb_info *sbi = fio->sbi;
169         enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
170         struct f2fs_bio_info *io;
171         bool is_read = is_read_io(fio->rw);
172         struct page *bio_page;
173
174         io = is_read ? &sbi->read_io : &sbi->write_io[btype];
175
176         verify_block_addr(sbi, fio->blk_addr);
177
178         down_write(&io->io_rwsem);
179
180         if (!is_read)
181                 inc_page_count(sbi, F2FS_WRITEBACK);
182
183         if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
184                                                 io->fio.rw != fio->rw))
185                 __submit_merged_bio(io);
186 alloc_new:
187         if (io->bio == NULL) {
188                 int bio_blocks = MAX_BIO_BLOCKS(sbi);
189
190                 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
191                 io->fio = *fio;
192         }
193
194         bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
195
196         if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
197                                                         PAGE_CACHE_SIZE) {
198                 __submit_merged_bio(io);
199                 goto alloc_new;
200         }
201
202         io->last_block_in_bio = fio->blk_addr;
203         f2fs_trace_ios(fio, 0);
204
205         up_write(&io->io_rwsem);
206         trace_f2fs_submit_page_mbio(fio->page, fio);
207 }
208
209 /*
210  * Lock ordering for the change of data block address:
211  * ->data_page
212  *  ->node_page
213  *    update block addresses in the node page
214  */
215 void set_data_blkaddr(struct dnode_of_data *dn)
216 {
217         struct f2fs_node *rn;
218         __le32 *addr_array;
219         struct page *node_page = dn->node_page;
220         unsigned int ofs_in_node = dn->ofs_in_node;
221
222         f2fs_wait_on_page_writeback(node_page, NODE);
223
224         rn = F2FS_NODE(node_page);
225
226         /* Get physical address of data block */
227         addr_array = blkaddr_in_node(rn);
228         addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
229         set_page_dirty(node_page);
230 }
231
232 int reserve_new_block(struct dnode_of_data *dn)
233 {
234         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
235
236         if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
237                 return -EPERM;
238         if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
239                 return -ENOSPC;
240
241         trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
242
243         dn->data_blkaddr = NEW_ADDR;
244         set_data_blkaddr(dn);
245         mark_inode_dirty(dn->inode);
246         sync_inode_page(dn);
247         return 0;
248 }
249
250 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
251 {
252         bool need_put = dn->inode_page ? false : true;
253         int err;
254
255         err = get_dnode_of_data(dn, index, ALLOC_NODE);
256         if (err)
257                 return err;
258
259         if (dn->data_blkaddr == NULL_ADDR)
260                 err = reserve_new_block(dn);
261         if (err || need_put)
262                 f2fs_put_dnode(dn);
263         return err;
264 }
265
266 struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
267 {
268         struct address_space *mapping = inode->i_mapping;
269         struct dnode_of_data dn;
270         struct page *page;
271         struct extent_info ei;
272         int err;
273         struct f2fs_io_info fio = {
274                 .sbi = F2FS_I_SB(inode),
275                 .type = DATA,
276                 .rw = rw,
277                 .encrypted_page = NULL,
278         };
279
280         if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
281                 return read_mapping_page(mapping, index, NULL);
282
283         page = grab_cache_page(mapping, index);
284         if (!page)
285                 return ERR_PTR(-ENOMEM);
286
287         if (f2fs_lookup_extent_cache(inode, index, &ei)) {
288                 dn.data_blkaddr = ei.blk + index - ei.fofs;
289                 goto got_it;
290         }
291
292         set_new_dnode(&dn, inode, NULL, NULL, 0);
293         err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
294         if (err)
295                 goto put_err;
296         f2fs_put_dnode(&dn);
297
298         if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
299                 err = -ENOENT;
300                 goto put_err;
301         }
302 got_it:
303         if (PageUptodate(page)) {
304                 unlock_page(page);
305                 return page;
306         }
307
308         /*
309          * A new dentry page is allocated but not able to be written, since its
310          * new inode page couldn't be allocated due to -ENOSPC.
311          * In such the case, its blkaddr can be remained as NEW_ADDR.
312          * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
313          */
314         if (dn.data_blkaddr == NEW_ADDR) {
315                 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
316                 SetPageUptodate(page);
317                 unlock_page(page);
318                 return page;
319         }
320
321         fio.blk_addr = dn.data_blkaddr;
322         fio.page = page;
323         err = f2fs_submit_page_bio(&fio);
324         if (err)
325                 goto put_err;
326         return page;
327
328 put_err:
329         f2fs_put_page(page, 1);
330         return ERR_PTR(err);
331 }
332
333 struct page *find_data_page(struct inode *inode, pgoff_t index)
334 {
335         struct address_space *mapping = inode->i_mapping;
336         struct page *page;
337
338         page = find_get_page(mapping, index);
339         if (page && PageUptodate(page))
340                 return page;
341         f2fs_put_page(page, 0);
342
343         page = get_read_data_page(inode, index, READ_SYNC);
344         if (IS_ERR(page))
345                 return page;
346
347         if (PageUptodate(page))
348                 return page;
349
350         wait_on_page_locked(page);
351         if (unlikely(!PageUptodate(page))) {
352                 f2fs_put_page(page, 0);
353                 return ERR_PTR(-EIO);
354         }
355         return page;
356 }
357
358 /*
359  * If it tries to access a hole, return an error.
360  * Because, the callers, functions in dir.c and GC, should be able to know
361  * whether this page exists or not.
362  */
363 struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
364 {
365         struct address_space *mapping = inode->i_mapping;
366         struct page *page;
367 repeat:
368         page = get_read_data_page(inode, index, READ_SYNC);
369         if (IS_ERR(page))
370                 return page;
371
372         /* wait for read completion */
373         lock_page(page);
374         if (unlikely(!PageUptodate(page))) {
375                 f2fs_put_page(page, 1);
376                 return ERR_PTR(-EIO);
377         }
378         if (unlikely(page->mapping != mapping)) {
379                 f2fs_put_page(page, 1);
380                 goto repeat;
381         }
382         return page;
383 }
384
385 /*
386  * Caller ensures that this data page is never allocated.
387  * A new zero-filled data page is allocated in the page cache.
388  *
389  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
390  * f2fs_unlock_op().
391  * Note that, ipage is set only by make_empty_dir.
392  */
393 struct page *get_new_data_page(struct inode *inode,
394                 struct page *ipage, pgoff_t index, bool new_i_size)
395 {
396         struct address_space *mapping = inode->i_mapping;
397         struct page *page;
398         struct dnode_of_data dn;
399         int err;
400 repeat:
401         page = grab_cache_page(mapping, index);
402         if (!page)
403                 return ERR_PTR(-ENOMEM);
404
405         set_new_dnode(&dn, inode, ipage, NULL, 0);
406         err = f2fs_reserve_block(&dn, index);
407         if (err) {
408                 f2fs_put_page(page, 1);
409                 return ERR_PTR(err);
410         }
411         if (!ipage)
412                 f2fs_put_dnode(&dn);
413
414         if (PageUptodate(page))
415                 goto got_it;
416
417         if (dn.data_blkaddr == NEW_ADDR) {
418                 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
419                 SetPageUptodate(page);
420         } else {
421                 f2fs_put_page(page, 1);
422
423                 page = get_read_data_page(inode, index, READ_SYNC);
424                 if (IS_ERR(page))
425                         goto repeat;
426
427                 /* wait for read completion */
428                 lock_page(page);
429         }
430 got_it:
431         if (new_i_size &&
432                 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
433                 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
434                 /* Only the directory inode sets new_i_size */
435                 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
436         }
437         return page;
438 }
439
440 static int __allocate_data_block(struct dnode_of_data *dn)
441 {
442         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
443         struct f2fs_inode_info *fi = F2FS_I(dn->inode);
444         struct f2fs_summary sum;
445         struct node_info ni;
446         int seg = CURSEG_WARM_DATA;
447         pgoff_t fofs;
448
449         if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
450                 return -EPERM;
451
452         dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
453         if (dn->data_blkaddr == NEW_ADDR)
454                 goto alloc;
455
456         if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
457                 return -ENOSPC;
458
459 alloc:
460         get_node_info(sbi, dn->nid, &ni);
461         set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
462
463         if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
464                 seg = CURSEG_DIRECT_IO;
465
466         allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
467                                                                 &sum, seg);
468         set_data_blkaddr(dn);
469
470         /* update i_size */
471         fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
472                                                         dn->ofs_in_node;
473         if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
474                 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
475
476         /* direct IO doesn't use extent cache to maximize the performance */
477         f2fs_drop_largest_extent(dn->inode, fofs);
478
479         return 0;
480 }
481
482 static void __allocate_data_blocks(struct inode *inode, loff_t offset,
483                                                         size_t count)
484 {
485         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
486         struct dnode_of_data dn;
487         u64 start = F2FS_BYTES_TO_BLK(offset);
488         u64 len = F2FS_BYTES_TO_BLK(count);
489         bool allocated;
490         u64 end_offset;
491
492         while (len) {
493                 f2fs_balance_fs(sbi);
494                 f2fs_lock_op(sbi);
495
496                 /* When reading holes, we need its node page */
497                 set_new_dnode(&dn, inode, NULL, NULL, 0);
498                 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
499                         goto out;
500
501                 allocated = false;
502                 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
503
504                 while (dn.ofs_in_node < end_offset && len) {
505                         block_t blkaddr;
506
507                         blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
508                         if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
509                                 if (__allocate_data_block(&dn))
510                                         goto sync_out;
511                                 allocated = true;
512                         }
513                         len--;
514                         start++;
515                         dn.ofs_in_node++;
516                 }
517
518                 if (allocated)
519                         sync_inode_page(&dn);
520
521                 f2fs_put_dnode(&dn);
522                 f2fs_unlock_op(sbi);
523         }
524         return;
525
526 sync_out:
527         if (allocated)
528                 sync_inode_page(&dn);
529         f2fs_put_dnode(&dn);
530 out:
531         f2fs_unlock_op(sbi);
532         return;
533 }
534
535 /*
536  * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
537  * f2fs_map_blocks structure.
538  * If original data blocks are allocated, then give them to blockdev.
539  * Otherwise,
540  *     a. preallocate requested block addresses
541  *     b. do not use extent cache for better performance
542  *     c. give the block addresses to blockdev
543  */
544 static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
545                         int create, bool fiemap)
546 {
547         unsigned int maxblocks = map->m_len;
548         struct dnode_of_data dn;
549         int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
550         pgoff_t pgofs, end_offset;
551         int err = 0, ofs = 1;
552         struct extent_info ei;
553         bool allocated = false;
554
555         map->m_len = 0;
556         map->m_flags = 0;
557
558         /* it only supports block size == page size */
559         pgofs = (pgoff_t)map->m_lblk;
560
561         if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
562                 map->m_pblk = ei.blk + pgofs - ei.fofs;
563                 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
564                 map->m_flags = F2FS_MAP_MAPPED;
565                 goto out;
566         }
567
568         if (create)
569                 f2fs_lock_op(F2FS_I_SB(inode));
570
571         /* When reading holes, we need its node page */
572         set_new_dnode(&dn, inode, NULL, NULL, 0);
573         err = get_dnode_of_data(&dn, pgofs, mode);
574         if (err) {
575                 if (err == -ENOENT)
576                         err = 0;
577                 goto unlock_out;
578         }
579         if (dn.data_blkaddr == NEW_ADDR && !fiemap)
580                 goto put_out;
581
582         if (dn.data_blkaddr != NULL_ADDR) {
583                 map->m_flags = F2FS_MAP_MAPPED;
584                 map->m_pblk = dn.data_blkaddr;
585                 if (dn.data_blkaddr == NEW_ADDR)
586                         map->m_flags |= F2FS_MAP_UNWRITTEN;
587         } else if (create) {
588                 err = __allocate_data_block(&dn);
589                 if (err)
590                         goto put_out;
591                 allocated = true;
592                 map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
593                 map->m_pblk = dn.data_blkaddr;
594         } else {
595                 goto put_out;
596         }
597
598         end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
599         map->m_len = 1;
600         dn.ofs_in_node++;
601         pgofs++;
602
603 get_next:
604         if (dn.ofs_in_node >= end_offset) {
605                 if (allocated)
606                         sync_inode_page(&dn);
607                 allocated = false;
608                 f2fs_put_dnode(&dn);
609
610                 set_new_dnode(&dn, inode, NULL, NULL, 0);
611                 err = get_dnode_of_data(&dn, pgofs, mode);
612                 if (err) {
613                         if (err == -ENOENT)
614                                 err = 0;
615                         goto unlock_out;
616                 }
617                 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
618                         goto put_out;
619
620                 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
621         }
622
623         if (maxblocks > map->m_len) {
624                 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
625                 if (blkaddr == NULL_ADDR && create) {
626                         err = __allocate_data_block(&dn);
627                         if (err)
628                                 goto sync_out;
629                         allocated = true;
630                         map->m_flags |= F2FS_MAP_NEW;
631                         blkaddr = dn.data_blkaddr;
632                 }
633                 /* Give more consecutive addresses for the readahead */
634                 if ((map->m_pblk != NEW_ADDR &&
635                                 blkaddr == (map->m_pblk + ofs)) ||
636                                 (map->m_pblk == NEW_ADDR &&
637                                 blkaddr == NEW_ADDR)) {
638                         ofs++;
639                         dn.ofs_in_node++;
640                         pgofs++;
641                         map->m_len++;
642                         goto get_next;
643                 }
644         }
645 sync_out:
646         if (allocated)
647                 sync_inode_page(&dn);
648 put_out:
649         f2fs_put_dnode(&dn);
650 unlock_out:
651         if (create)
652                 f2fs_unlock_op(F2FS_I_SB(inode));
653 out:
654         trace_f2fs_map_blocks(inode, map, err);
655         return err;
656 }
657
658 static int __get_data_block(struct inode *inode, sector_t iblock,
659                         struct buffer_head *bh, int create, bool fiemap)
660 {
661         struct f2fs_map_blocks map;
662         int ret;
663
664         map.m_lblk = iblock;
665         map.m_len = bh->b_size >> inode->i_blkbits;
666
667         ret = f2fs_map_blocks(inode, &map, create, fiemap);
668         if (!ret) {
669                 map_bh(bh, inode->i_sb, map.m_pblk);
670                 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
671                 bh->b_size = map.m_len << inode->i_blkbits;
672         }
673         return ret;
674 }
675
676 static int get_data_block(struct inode *inode, sector_t iblock,
677                         struct buffer_head *bh_result, int create)
678 {
679         return __get_data_block(inode, iblock, bh_result, create, false);
680 }
681
682 static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
683                         struct buffer_head *bh_result, int create)
684 {
685         return __get_data_block(inode, iblock, bh_result, create, true);
686 }
687
688 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
689 {
690         return (offset >> inode->i_blkbits);
691 }
692
693 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
694 {
695         return (blk << inode->i_blkbits);
696 }
697
698 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
699                 u64 start, u64 len)
700 {
701         struct buffer_head map_bh;
702         sector_t start_blk, last_blk;
703         loff_t isize = i_size_read(inode);
704         u64 logical = 0, phys = 0, size = 0;
705         u32 flags = 0;
706         bool past_eof = false, whole_file = false;
707         int ret = 0;
708
709         ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
710         if (ret)
711                 return ret;
712
713         mutex_lock(&inode->i_mutex);
714
715         if (len >= isize) {
716                 whole_file = true;
717                 len = isize;
718         }
719
720         if (logical_to_blk(inode, len) == 0)
721                 len = blk_to_logical(inode, 1);
722
723         start_blk = logical_to_blk(inode, start);
724         last_blk = logical_to_blk(inode, start + len - 1);
725 next:
726         memset(&map_bh, 0, sizeof(struct buffer_head));
727         map_bh.b_size = len;
728
729         ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0);
730         if (ret)
731                 goto out;
732
733         /* HOLE */
734         if (!buffer_mapped(&map_bh)) {
735                 start_blk++;
736
737                 if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
738                         past_eof = 1;
739
740                 if (past_eof && size) {
741                         flags |= FIEMAP_EXTENT_LAST;
742                         ret = fiemap_fill_next_extent(fieinfo, logical,
743                                         phys, size, flags);
744                 } else if (size) {
745                         ret = fiemap_fill_next_extent(fieinfo, logical,
746                                         phys, size, flags);
747                         size = 0;
748                 }
749
750                 /* if we have holes up to/past EOF then we're done */
751                 if (start_blk > last_blk || past_eof || ret)
752                         goto out;
753         } else {
754                 if (start_blk > last_blk && !whole_file) {
755                         ret = fiemap_fill_next_extent(fieinfo, logical,
756                                         phys, size, flags);
757                         goto out;
758                 }
759
760                 /*
761                  * if size != 0 then we know we already have an extent
762                  * to add, so add it.
763                  */
764                 if (size) {
765                         ret = fiemap_fill_next_extent(fieinfo, logical,
766                                         phys, size, flags);
767                         if (ret)
768                                 goto out;
769                 }
770
771                 logical = blk_to_logical(inode, start_blk);
772                 phys = blk_to_logical(inode, map_bh.b_blocknr);
773                 size = map_bh.b_size;
774                 flags = 0;
775                 if (buffer_unwritten(&map_bh))
776                         flags = FIEMAP_EXTENT_UNWRITTEN;
777
778                 start_blk += logical_to_blk(inode, size);
779
780                 /*
781                  * If we are past the EOF, then we need to make sure as
782                  * soon as we find a hole that the last extent we found
783                  * is marked with FIEMAP_EXTENT_LAST
784                  */
785                 if (!past_eof && logical + size >= isize)
786                         past_eof = true;
787         }
788         cond_resched();
789         if (fatal_signal_pending(current))
790                 ret = -EINTR;
791         else
792                 goto next;
793 out:
794         if (ret == 1)
795                 ret = 0;
796
797         mutex_unlock(&inode->i_mutex);
798         return ret;
799 }
800
801 /*
802  * This function was originally taken from fs/mpage.c, and customized for f2fs.
803  * Major change was from block_size == page_size in f2fs by default.
804  */
805 static int f2fs_mpage_readpages(struct address_space *mapping,
806                         struct list_head *pages, struct page *page,
807                         unsigned nr_pages)
808 {
809         struct bio *bio = NULL;
810         unsigned page_idx;
811         sector_t last_block_in_bio = 0;
812         struct inode *inode = mapping->host;
813         const unsigned blkbits = inode->i_blkbits;
814         const unsigned blocksize = 1 << blkbits;
815         sector_t block_in_file;
816         sector_t last_block;
817         sector_t last_block_in_file;
818         sector_t block_nr;
819         struct block_device *bdev = inode->i_sb->s_bdev;
820         struct f2fs_map_blocks map;
821
822         map.m_pblk = 0;
823         map.m_lblk = 0;
824         map.m_len = 0;
825         map.m_flags = 0;
826
827         for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
828
829                 prefetchw(&page->flags);
830                 if (pages) {
831                         page = list_entry(pages->prev, struct page, lru);
832                         list_del(&page->lru);
833                         if (add_to_page_cache_lru(page, mapping,
834                                                   page->index, GFP_KERNEL))
835                                 goto next_page;
836                 }
837
838                 block_in_file = (sector_t)page->index;
839                 last_block = block_in_file + nr_pages;
840                 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
841                                                                 blkbits;
842                 if (last_block > last_block_in_file)
843                         last_block = last_block_in_file;
844
845                 /*
846                  * Map blocks using the previous result first.
847                  */
848                 if ((map.m_flags & F2FS_MAP_MAPPED) &&
849                                 block_in_file > map.m_lblk &&
850                                 block_in_file < (map.m_lblk + map.m_len))
851                         goto got_it;
852
853                 /*
854                  * Then do more f2fs_map_blocks() calls until we are
855                  * done with this page.
856                  */
857                 map.m_flags = 0;
858
859                 if (block_in_file < last_block) {
860                         map.m_lblk = block_in_file;
861                         map.m_len = last_block - block_in_file;
862
863                         if (f2fs_map_blocks(inode, &map, 0, false))
864                                 goto set_error_page;
865                 }
866 got_it:
867                 if ((map.m_flags & F2FS_MAP_MAPPED)) {
868                         block_nr = map.m_pblk + block_in_file - map.m_lblk;
869                         SetPageMappedToDisk(page);
870
871                         if (!PageUptodate(page) && !cleancache_get_page(page)) {
872                                 SetPageUptodate(page);
873                                 goto confused;
874                         }
875                 } else {
876                         zero_user_segment(page, 0, PAGE_CACHE_SIZE);
877                         SetPageUptodate(page);
878                         unlock_page(page);
879                         goto next_page;
880                 }
881
882                 /*
883                  * This page will go to BIO.  Do we need to send this
884                  * BIO off first?
885                  */
886                 if (bio && (last_block_in_bio != block_nr - 1)) {
887 submit_and_realloc:
888                         submit_bio(READ, bio);
889                         bio = NULL;
890                 }
891                 if (bio == NULL) {
892                         struct f2fs_crypto_ctx *ctx = NULL;
893
894                         if (f2fs_encrypted_inode(inode) &&
895                                         S_ISREG(inode->i_mode)) {
896                                 struct page *cpage;
897
898                                 ctx = f2fs_get_crypto_ctx(inode);
899                                 if (IS_ERR(ctx))
900                                         goto set_error_page;
901
902                                 /* wait the page to be moved by cleaning */
903                                 cpage = find_lock_page(
904                                                 META_MAPPING(F2FS_I_SB(inode)),
905                                                 block_nr);
906                                 if (cpage) {
907                                         f2fs_wait_on_page_writeback(cpage,
908                                                                         DATA);
909                                         f2fs_put_page(cpage, 1);
910                                 }
911                         }
912
913                         bio = bio_alloc(GFP_KERNEL,
914                                 min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
915                         if (!bio) {
916                                 if (ctx)
917                                         f2fs_release_crypto_ctx(ctx);
918                                 goto set_error_page;
919                         }
920                         bio->bi_bdev = bdev;
921                         bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
922                         bio->bi_end_io = f2fs_read_end_io;
923                         bio->bi_private = ctx;
924                 }
925
926                 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
927                         goto submit_and_realloc;
928
929                 last_block_in_bio = block_nr;
930                 goto next_page;
931 set_error_page:
932                 SetPageError(page);
933                 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
934                 unlock_page(page);
935                 goto next_page;
936 confused:
937                 if (bio) {
938                         submit_bio(READ, bio);
939                         bio = NULL;
940                 }
941                 unlock_page(page);
942 next_page:
943                 if (pages)
944                         page_cache_release(page);
945         }
946         BUG_ON(pages && !list_empty(pages));
947         if (bio)
948                 submit_bio(READ, bio);
949         return 0;
950 }
951
952 static int f2fs_read_data_page(struct file *file, struct page *page)
953 {
954         struct inode *inode = page->mapping->host;
955         int ret = -EAGAIN;
956
957         trace_f2fs_readpage(page, DATA);
958
959         /* If the file has inline data, try to read it directly */
960         if (f2fs_has_inline_data(inode))
961                 ret = f2fs_read_inline_data(inode, page);
962         if (ret == -EAGAIN)
963                 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
964         return ret;
965 }
966
967 static int f2fs_read_data_pages(struct file *file,
968                         struct address_space *mapping,
969                         struct list_head *pages, unsigned nr_pages)
970 {
971         struct inode *inode = file->f_mapping->host;
972
973         /* If the file has inline data, skip readpages */
974         if (f2fs_has_inline_data(inode))
975                 return 0;
976
977         return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
978 }
979
980 int do_write_data_page(struct f2fs_io_info *fio)
981 {
982         struct page *page = fio->page;
983         struct inode *inode = page->mapping->host;
984         struct dnode_of_data dn;
985         int err = 0;
986
987         set_new_dnode(&dn, inode, NULL, NULL, 0);
988         err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
989         if (err)
990                 return err;
991
992         fio->blk_addr = dn.data_blkaddr;
993
994         /* This page is already truncated */
995         if (fio->blk_addr == NULL_ADDR) {
996                 ClearPageUptodate(page);
997                 goto out_writepage;
998         }
999
1000         if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1001                 fio->encrypted_page = f2fs_encrypt(inode, fio->page);
1002                 if (IS_ERR(fio->encrypted_page)) {
1003                         err = PTR_ERR(fio->encrypted_page);
1004                         goto out_writepage;
1005                 }
1006         }
1007
1008         set_page_writeback(page);
1009
1010         /*
1011          * If current allocation needs SSR,
1012          * it had better in-place writes for updated data.
1013          */
1014         if (unlikely(fio->blk_addr != NEW_ADDR &&
1015                         !is_cold_data(page) &&
1016                         need_inplace_update(inode))) {
1017                 rewrite_data_page(fio);
1018                 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1019                 trace_f2fs_do_write_data_page(page, IPU);
1020         } else {
1021                 write_data_page(&dn, fio);
1022                 set_data_blkaddr(&dn);
1023                 f2fs_update_extent_cache(&dn);
1024                 trace_f2fs_do_write_data_page(page, OPU);
1025                 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1026                 if (page->index == 0)
1027                         set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
1028         }
1029 out_writepage:
1030         f2fs_put_dnode(&dn);
1031         return err;
1032 }
1033
1034 static int f2fs_write_data_page(struct page *page,
1035                                         struct writeback_control *wbc)
1036 {
1037         struct inode *inode = page->mapping->host;
1038         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1039         loff_t i_size = i_size_read(inode);
1040         const pgoff_t end_index = ((unsigned long long) i_size)
1041                                                         >> PAGE_CACHE_SHIFT;
1042         unsigned offset = 0;
1043         bool need_balance_fs = false;
1044         int err = 0;
1045         struct f2fs_io_info fio = {
1046                 .sbi = sbi,
1047                 .type = DATA,
1048                 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1049                 .page = page,
1050                 .encrypted_page = NULL,
1051         };
1052
1053         trace_f2fs_writepage(page, DATA);
1054
1055         if (page->index < end_index)
1056                 goto write;
1057
1058         /*
1059          * If the offset is out-of-range of file size,
1060          * this page does not have to be written to disk.
1061          */
1062         offset = i_size & (PAGE_CACHE_SIZE - 1);
1063         if ((page->index >= end_index + 1) || !offset)
1064                 goto out;
1065
1066         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1067 write:
1068         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1069                 goto redirty_out;
1070         if (f2fs_is_drop_cache(inode))
1071                 goto out;
1072         if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
1073                         available_free_memory(sbi, BASE_CHECK))
1074                 goto redirty_out;
1075
1076         /* Dentry blocks are controlled by checkpoint */
1077         if (S_ISDIR(inode->i_mode)) {
1078                 if (unlikely(f2fs_cp_error(sbi)))
1079                         goto redirty_out;
1080                 err = do_write_data_page(&fio);
1081                 goto done;
1082         }
1083
1084         /* we should bypass data pages to proceed the kworkder jobs */
1085         if (unlikely(f2fs_cp_error(sbi))) {
1086                 SetPageError(page);
1087                 goto out;
1088         }
1089
1090         if (!wbc->for_reclaim)
1091                 need_balance_fs = true;
1092         else if (has_not_enough_free_secs(sbi, 0))
1093                 goto redirty_out;
1094
1095         err = -EAGAIN;
1096         f2fs_lock_op(sbi);
1097         if (f2fs_has_inline_data(inode))
1098                 err = f2fs_write_inline_data(inode, page);
1099         if (err == -EAGAIN)
1100                 err = do_write_data_page(&fio);
1101         f2fs_unlock_op(sbi);
1102 done:
1103         if (err && err != -ENOENT)
1104                 goto redirty_out;
1105
1106         clear_cold_data(page);
1107 out:
1108         inode_dec_dirty_pages(inode);
1109         if (err)
1110                 ClearPageUptodate(page);
1111         unlock_page(page);
1112         if (need_balance_fs)
1113                 f2fs_balance_fs(sbi);
1114         if (wbc->for_reclaim)
1115                 f2fs_submit_merged_bio(sbi, DATA, WRITE);
1116         return 0;
1117
1118 redirty_out:
1119         redirty_page_for_writepage(wbc, page);
1120         return AOP_WRITEPAGE_ACTIVATE;
1121 }
1122
1123 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
1124                         void *data)
1125 {
1126         struct address_space *mapping = data;
1127         int ret = mapping->a_ops->writepage(page, wbc);
1128         mapping_set_error(mapping, ret);
1129         return ret;
1130 }
1131
1132 /*
1133  * This function was copied from write_cche_pages from mm/page-writeback.c.
1134  * The major change is making write step of cold data page separately from
1135  * warm/hot data page.
1136  */
1137 static int f2fs_write_cache_pages(struct address_space *mapping,
1138                         struct writeback_control *wbc, writepage_t writepage,
1139                         void *data)
1140 {
1141         int ret = 0;
1142         int done = 0;
1143         struct pagevec pvec;
1144         int nr_pages;
1145         pgoff_t uninitialized_var(writeback_index);
1146         pgoff_t index;
1147         pgoff_t end;            /* Inclusive */
1148         pgoff_t done_index;
1149         int cycled;
1150         int range_whole = 0;
1151         int tag;
1152         int step = 0;
1153
1154         pagevec_init(&pvec, 0);
1155 next:
1156         if (wbc->range_cyclic) {
1157                 writeback_index = mapping->writeback_index; /* prev offset */
1158                 index = writeback_index;
1159                 if (index == 0)
1160                         cycled = 1;
1161                 else
1162                         cycled = 0;
1163                 end = -1;
1164         } else {
1165                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1166                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1167                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1168                         range_whole = 1;
1169                 cycled = 1; /* ignore range_cyclic tests */
1170         }
1171         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1172                 tag = PAGECACHE_TAG_TOWRITE;
1173         else
1174                 tag = PAGECACHE_TAG_DIRTY;
1175 retry:
1176         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1177                 tag_pages_for_writeback(mapping, index, end);
1178         done_index = index;
1179         while (!done && (index <= end)) {
1180                 int i;
1181
1182                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1183                               min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1184                 if (nr_pages == 0)
1185                         break;
1186
1187                 for (i = 0; i < nr_pages; i++) {
1188                         struct page *page = pvec.pages[i];
1189
1190                         if (page->index > end) {
1191                                 done = 1;
1192                                 break;
1193                         }
1194
1195                         done_index = page->index;
1196
1197                         lock_page(page);
1198
1199                         if (unlikely(page->mapping != mapping)) {
1200 continue_unlock:
1201                                 unlock_page(page);
1202                                 continue;
1203                         }
1204
1205                         if (!PageDirty(page)) {
1206                                 /* someone wrote it for us */
1207                                 goto continue_unlock;
1208                         }
1209
1210                         if (step == is_cold_data(page))
1211                                 goto continue_unlock;
1212
1213                         if (PageWriteback(page)) {
1214                                 if (wbc->sync_mode != WB_SYNC_NONE)
1215                                         f2fs_wait_on_page_writeback(page, DATA);
1216                                 else
1217                                         goto continue_unlock;
1218                         }
1219
1220                         BUG_ON(PageWriteback(page));
1221                         if (!clear_page_dirty_for_io(page))
1222                                 goto continue_unlock;
1223
1224                         ret = (*writepage)(page, wbc, data);
1225                         if (unlikely(ret)) {
1226                                 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1227                                         unlock_page(page);
1228                                         ret = 0;
1229                                 } else {
1230                                         done_index = page->index + 1;
1231                                         done = 1;
1232                                         break;
1233                                 }
1234                         }
1235
1236                         if (--wbc->nr_to_write <= 0 &&
1237                             wbc->sync_mode == WB_SYNC_NONE) {
1238                                 done = 1;
1239                                 break;
1240                         }
1241                 }
1242                 pagevec_release(&pvec);
1243                 cond_resched();
1244         }
1245
1246         if (step < 1) {
1247                 step++;
1248                 goto next;
1249         }
1250
1251         if (!cycled && !done) {
1252                 cycled = 1;
1253                 index = 0;
1254                 end = writeback_index - 1;
1255                 goto retry;
1256         }
1257         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1258                 mapping->writeback_index = done_index;
1259
1260         return ret;
1261 }
1262
1263 static int f2fs_write_data_pages(struct address_space *mapping,
1264                             struct writeback_control *wbc)
1265 {
1266         struct inode *inode = mapping->host;
1267         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1268         bool locked = false;
1269         int ret;
1270         long diff;
1271
1272         trace_f2fs_writepages(mapping->host, wbc, DATA);
1273
1274         /* deal with chardevs and other special file */
1275         if (!mapping->a_ops->writepage)
1276                 return 0;
1277
1278         /* skip writing if there is no dirty page in this inode */
1279         if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1280                 return 0;
1281
1282         if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1283                         get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1284                         available_free_memory(sbi, DIRTY_DENTS))
1285                 goto skip_write;
1286
1287         /* during POR, we don't need to trigger writepage at all. */
1288         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1289                 goto skip_write;
1290
1291         diff = nr_pages_to_write(sbi, DATA, wbc);
1292
1293         if (!S_ISDIR(inode->i_mode)) {
1294                 mutex_lock(&sbi->writepages);
1295                 locked = true;
1296         }
1297         ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1298         f2fs_submit_merged_bio(sbi, DATA, WRITE);
1299         if (locked)
1300                 mutex_unlock(&sbi->writepages);
1301
1302         remove_dirty_dir_inode(inode);
1303
1304         wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1305         return ret;
1306
1307 skip_write:
1308         wbc->pages_skipped += get_dirty_pages(inode);
1309         return 0;
1310 }
1311
1312 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1313 {
1314         struct inode *inode = mapping->host;
1315
1316         if (to > inode->i_size) {
1317                 truncate_pagecache(inode, inode->i_size);
1318                 truncate_blocks(inode, inode->i_size, true);
1319         }
1320 }
1321
1322 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1323                 loff_t pos, unsigned len, unsigned flags,
1324                 struct page **pagep, void **fsdata)
1325 {
1326         struct inode *inode = mapping->host;
1327         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1328         struct page *page = NULL;
1329         struct page *ipage;
1330         pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1331         struct dnode_of_data dn;
1332         int err = 0;
1333
1334         trace_f2fs_write_begin(inode, pos, len, flags);
1335
1336         f2fs_balance_fs(sbi);
1337
1338         /*
1339          * We should check this at this moment to avoid deadlock on inode page
1340          * and #0 page. The locking rule for inline_data conversion should be:
1341          * lock_page(page #0) -> lock_page(inode_page)
1342          */
1343         if (index != 0) {
1344                 err = f2fs_convert_inline_inode(inode);
1345                 if (err)
1346                         goto fail;
1347         }
1348 repeat:
1349         page = grab_cache_page_write_begin(mapping, index, flags);
1350         if (!page) {
1351                 err = -ENOMEM;
1352                 goto fail;
1353         }
1354
1355         *pagep = page;
1356
1357         f2fs_lock_op(sbi);
1358
1359         /* check inline_data */
1360         ipage = get_node_page(sbi, inode->i_ino);
1361         if (IS_ERR(ipage)) {
1362                 err = PTR_ERR(ipage);
1363                 goto unlock_fail;
1364         }
1365
1366         set_new_dnode(&dn, inode, ipage, ipage, 0);
1367
1368         if (f2fs_has_inline_data(inode)) {
1369                 if (pos + len <= MAX_INLINE_DATA) {
1370                         read_inline_data(page, ipage);
1371                         set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1372                         sync_inode_page(&dn);
1373                         goto put_next;
1374                 }
1375                 err = f2fs_convert_inline_page(&dn, page);
1376                 if (err)
1377                         goto put_fail;
1378         }
1379         err = f2fs_reserve_block(&dn, index);
1380         if (err)
1381                 goto put_fail;
1382 put_next:
1383         f2fs_put_dnode(&dn);
1384         f2fs_unlock_op(sbi);
1385
1386         if (len == PAGE_CACHE_SIZE)
1387                 goto out_update;
1388         if (PageUptodate(page))
1389                 goto out_clear;
1390
1391         f2fs_wait_on_page_writeback(page, DATA);
1392
1393         if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1394                 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1395                 unsigned end = start + len;
1396
1397                 /* Reading beyond i_size is simple: memset to zero */
1398                 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
1399                 goto out_update;
1400         }
1401
1402         if (dn.data_blkaddr == NEW_ADDR) {
1403                 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1404         } else {
1405                 struct f2fs_io_info fio = {
1406                         .sbi = sbi,
1407                         .type = DATA,
1408                         .rw = READ_SYNC,
1409                         .blk_addr = dn.data_blkaddr,
1410                         .page = page,
1411                         .encrypted_page = NULL,
1412                 };
1413                 err = f2fs_submit_page_bio(&fio);
1414                 if (err)
1415                         goto fail;
1416
1417                 lock_page(page);
1418                 if (unlikely(!PageUptodate(page))) {
1419                         err = -EIO;
1420                         goto fail;
1421                 }
1422                 if (unlikely(page->mapping != mapping)) {
1423                         f2fs_put_page(page, 1);
1424                         goto repeat;
1425                 }
1426
1427                 /* avoid symlink page */
1428                 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1429                         err = f2fs_decrypt_one(inode, page);
1430                         if (err)
1431                                 goto fail;
1432                 }
1433         }
1434 out_update:
1435         SetPageUptodate(page);
1436 out_clear:
1437         clear_cold_data(page);
1438         return 0;
1439
1440 put_fail:
1441         f2fs_put_dnode(&dn);
1442 unlock_fail:
1443         f2fs_unlock_op(sbi);
1444 fail:
1445         f2fs_put_page(page, 1);
1446         f2fs_write_failed(mapping, pos + len);
1447         return err;
1448 }
1449
1450 static int f2fs_write_end(struct file *file,
1451                         struct address_space *mapping,
1452                         loff_t pos, unsigned len, unsigned copied,
1453                         struct page *page, void *fsdata)
1454 {
1455         struct inode *inode = page->mapping->host;
1456
1457         trace_f2fs_write_end(inode, pos, len, copied);
1458
1459         set_page_dirty(page);
1460
1461         if (pos + copied > i_size_read(inode)) {
1462                 i_size_write(inode, pos + copied);
1463                 mark_inode_dirty(inode);
1464                 update_inode_page(inode);
1465         }
1466
1467         f2fs_put_page(page, 1);
1468         return copied;
1469 }
1470
1471 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1472                            loff_t offset)
1473 {
1474         unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
1475
1476         if (iov_iter_rw(iter) == READ)
1477                 return 0;
1478
1479         if (offset & blocksize_mask)
1480                 return -EINVAL;
1481
1482         if (iov_iter_alignment(iter) & blocksize_mask)
1483                 return -EINVAL;
1484
1485         return 0;
1486 }
1487
1488 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1489                               loff_t offset)
1490 {
1491         struct file *file = iocb->ki_filp;
1492         struct address_space *mapping = file->f_mapping;
1493         struct inode *inode = mapping->host;
1494         size_t count = iov_iter_count(iter);
1495         int err;
1496
1497         /* we don't need to use inline_data strictly */
1498         if (f2fs_has_inline_data(inode)) {
1499                 err = f2fs_convert_inline_inode(inode);
1500                 if (err)
1501                         return err;
1502         }
1503
1504         if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1505                 return 0;
1506
1507         if (check_direct_IO(inode, iter, offset))
1508                 return 0;
1509
1510         trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1511
1512         if (iov_iter_rw(iter) == WRITE)
1513                 __allocate_data_blocks(inode, offset, count);
1514
1515         err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
1516         if (err < 0 && iov_iter_rw(iter) == WRITE)
1517                 f2fs_write_failed(mapping, offset + count);
1518
1519         trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1520
1521         return err;
1522 }
1523
1524 void f2fs_invalidate_page(struct page *page, unsigned int offset,
1525                                                         unsigned int length)
1526 {
1527         struct inode *inode = page->mapping->host;
1528         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1529
1530         if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1531                 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
1532                 return;
1533
1534         if (PageDirty(page)) {
1535                 if (inode->i_ino == F2FS_META_INO(sbi))
1536                         dec_page_count(sbi, F2FS_DIRTY_META);
1537                 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1538                         dec_page_count(sbi, F2FS_DIRTY_NODES);
1539                 else
1540                         inode_dec_dirty_pages(inode);
1541         }
1542         ClearPagePrivate(page);
1543 }
1544
1545 int f2fs_release_page(struct page *page, gfp_t wait)
1546 {
1547         /* If this is dirty page, keep PagePrivate */
1548         if (PageDirty(page))
1549                 return 0;
1550
1551         ClearPagePrivate(page);
1552         return 1;
1553 }
1554
1555 static int f2fs_set_data_page_dirty(struct page *page)
1556 {
1557         struct address_space *mapping = page->mapping;
1558         struct inode *inode = mapping->host;
1559
1560         trace_f2fs_set_page_dirty(page, DATA);
1561
1562         SetPageUptodate(page);
1563
1564         if (f2fs_is_atomic_file(inode)) {
1565                 register_inmem_page(inode, page);
1566                 return 1;
1567         }
1568
1569         if (!PageDirty(page)) {
1570                 __set_page_dirty_nobuffers(page);
1571                 update_dirty_page(inode, page);
1572                 return 1;
1573         }
1574         return 0;
1575 }
1576
1577 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1578 {
1579         struct inode *inode = mapping->host;
1580
1581         /* we don't need to use inline_data strictly */
1582         if (f2fs_has_inline_data(inode)) {
1583                 int err = f2fs_convert_inline_inode(inode);
1584                 if (err)
1585                         return err;
1586         }
1587         return generic_block_bmap(mapping, block, get_data_block);
1588 }
1589
1590 const struct address_space_operations f2fs_dblock_aops = {
1591         .readpage       = f2fs_read_data_page,
1592         .readpages      = f2fs_read_data_pages,
1593         .writepage      = f2fs_write_data_page,
1594         .writepages     = f2fs_write_data_pages,
1595         .write_begin    = f2fs_write_begin,
1596         .write_end      = f2fs_write_end,
1597         .set_page_dirty = f2fs_set_data_page_dirty,
1598         .invalidatepage = f2fs_invalidate_page,
1599         .releasepage    = f2fs_release_page,
1600         .direct_IO      = f2fs_direct_IO,
1601         .bmap           = f2fs_bmap,
1602 };