5af97d99106e04d931736891261688c3b9409bf5
[firefly-linux-kernel-4.4.55.git] / fs / f2fs / checkpoint.c
1 /*
2  * fs/f2fs/checkpoint.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/bio.h>
13 #include <linux/mpage.h>
14 #include <linux/writeback.h>
15 #include <linux/blkdev.h>
16 #include <linux/f2fs_fs.h>
17 #include <linux/pagevec.h>
18 #include <linux/swap.h>
19
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include <trace/events/f2fs.h>
24
25 static struct kmem_cache *ino_entry_slab;
26 static struct kmem_cache *inode_entry_slab;
27
28 /*
29  * We guarantee no failure on the returned page.
30  */
31 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
32 {
33         struct address_space *mapping = META_MAPPING(sbi);
34         struct page *page = NULL;
35 repeat:
36         page = grab_cache_page(mapping, index);
37         if (!page) {
38                 cond_resched();
39                 goto repeat;
40         }
41         f2fs_wait_on_page_writeback(page, META);
42         SetPageUptodate(page);
43         return page;
44 }
45
46 /*
47  * We guarantee no failure on the returned page.
48  */
49 struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
50 {
51         struct address_space *mapping = META_MAPPING(sbi);
52         struct page *page;
53 repeat:
54         page = grab_cache_page(mapping, index);
55         if (!page) {
56                 cond_resched();
57                 goto repeat;
58         }
59         if (PageUptodate(page))
60                 goto out;
61
62         if (f2fs_submit_page_bio(sbi, page, index,
63                                 READ_SYNC | REQ_META | REQ_PRIO))
64                 goto repeat;
65
66         lock_page(page);
67         if (unlikely(page->mapping != mapping)) {
68                 f2fs_put_page(page, 1);
69                 goto repeat;
70         }
71 out:
72         return page;
73 }
74
75 static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
76 {
77         switch (type) {
78         case META_NAT:
79                 return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
80         case META_SIT:
81                 return SIT_BLK_CNT(sbi);
82         case META_SSA:
83         case META_CP:
84                 return 0;
85         default:
86                 BUG();
87         }
88 }
89
90 /*
91  * Readahead CP/NAT/SIT/SSA pages
92  */
93 int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
94 {
95         block_t prev_blk_addr = 0;
96         struct page *page;
97         int blkno = start;
98         int max_blks = get_max_meta_blks(sbi, type);
99
100         struct f2fs_io_info fio = {
101                 .type = META,
102                 .rw = READ_SYNC | REQ_META | REQ_PRIO
103         };
104
105         for (; nrpages-- > 0; blkno++) {
106                 block_t blk_addr;
107
108                 switch (type) {
109                 case META_NAT:
110                         /* get nat block addr */
111                         if (unlikely(blkno >= max_blks))
112                                 blkno = 0;
113                         blk_addr = current_nat_addr(sbi,
114                                         blkno * NAT_ENTRY_PER_BLOCK);
115                         break;
116                 case META_SIT:
117                         /* get sit block addr */
118                         if (unlikely(blkno >= max_blks))
119                                 goto out;
120                         blk_addr = current_sit_addr(sbi,
121                                         blkno * SIT_ENTRY_PER_BLOCK);
122                         if (blkno != start && prev_blk_addr + 1 != blk_addr)
123                                 goto out;
124                         prev_blk_addr = blk_addr;
125                         break;
126                 case META_SSA:
127                 case META_CP:
128                         /* get ssa/cp block addr */
129                         blk_addr = blkno;
130                         break;
131                 default:
132                         BUG();
133                 }
134
135                 page = grab_cache_page(META_MAPPING(sbi), blk_addr);
136                 if (!page)
137                         continue;
138                 if (PageUptodate(page)) {
139                         f2fs_put_page(page, 1);
140                         continue;
141                 }
142
143                 f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
144                 f2fs_put_page(page, 0);
145         }
146 out:
147         f2fs_submit_merged_bio(sbi, META, READ);
148         return blkno - start;
149 }
150
151 static int f2fs_write_meta_page(struct page *page,
152                                 struct writeback_control *wbc)
153 {
154         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
155
156         trace_f2fs_writepage(page, META);
157
158         if (unlikely(sbi->por_doing))
159                 goto redirty_out;
160         if (wbc->for_reclaim)
161                 goto redirty_out;
162         if (unlikely(f2fs_cp_error(sbi)))
163                 goto redirty_out;
164
165         f2fs_wait_on_page_writeback(page, META);
166         write_meta_page(sbi, page);
167         dec_page_count(sbi, F2FS_DIRTY_META);
168         unlock_page(page);
169         return 0;
170
171 redirty_out:
172         redirty_page_for_writepage(wbc, page);
173         return AOP_WRITEPAGE_ACTIVATE;
174 }
175
176 static int f2fs_write_meta_pages(struct address_space *mapping,
177                                 struct writeback_control *wbc)
178 {
179         struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
180         long diff, written;
181
182         trace_f2fs_writepages(mapping->host, wbc, META);
183
184         /* collect a number of dirty meta pages and write together */
185         if (wbc->for_kupdate ||
186                 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
187                 goto skip_write;
188
189         /* if mounting is failed, skip writing node pages */
190         mutex_lock(&sbi->cp_mutex);
191         diff = nr_pages_to_write(sbi, META, wbc);
192         written = sync_meta_pages(sbi, META, wbc->nr_to_write);
193         mutex_unlock(&sbi->cp_mutex);
194         wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
195         return 0;
196
197 skip_write:
198         wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
199         return 0;
200 }
201
202 long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
203                                                 long nr_to_write)
204 {
205         struct address_space *mapping = META_MAPPING(sbi);
206         pgoff_t index = 0, end = LONG_MAX;
207         struct pagevec pvec;
208         long nwritten = 0;
209         struct writeback_control wbc = {
210                 .for_reclaim = 0,
211         };
212
213         pagevec_init(&pvec, 0);
214
215         while (index <= end) {
216                 int i, nr_pages;
217                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
218                                 PAGECACHE_TAG_DIRTY,
219                                 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
220                 if (unlikely(nr_pages == 0))
221                         break;
222
223                 for (i = 0; i < nr_pages; i++) {
224                         struct page *page = pvec.pages[i];
225
226                         lock_page(page);
227
228                         if (unlikely(page->mapping != mapping)) {
229 continue_unlock:
230                                 unlock_page(page);
231                                 continue;
232                         }
233                         if (!PageDirty(page)) {
234                                 /* someone wrote it for us */
235                                 goto continue_unlock;
236                         }
237
238                         if (!clear_page_dirty_for_io(page))
239                                 goto continue_unlock;
240
241                         if (f2fs_write_meta_page(page, &wbc)) {
242                                 unlock_page(page);
243                                 break;
244                         }
245                         nwritten++;
246                         if (unlikely(nwritten >= nr_to_write))
247                                 break;
248                 }
249                 pagevec_release(&pvec);
250                 cond_resched();
251         }
252
253         if (nwritten)
254                 f2fs_submit_merged_bio(sbi, type, WRITE);
255
256         return nwritten;
257 }
258
259 static int f2fs_set_meta_page_dirty(struct page *page)
260 {
261         trace_f2fs_set_page_dirty(page, META);
262
263         SetPageUptodate(page);
264         if (!PageDirty(page)) {
265                 __set_page_dirty_nobuffers(page);
266                 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
267                 return 1;
268         }
269         return 0;
270 }
271
272 const struct address_space_operations f2fs_meta_aops = {
273         .writepage      = f2fs_write_meta_page,
274         .writepages     = f2fs_write_meta_pages,
275         .set_page_dirty = f2fs_set_meta_page_dirty,
276 };
277
278 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
279 {
280         struct ino_entry *e;
281 retry:
282         spin_lock(&sbi->ino_lock[type]);
283
284         e = radix_tree_lookup(&sbi->ino_root[type], ino);
285         if (!e) {
286                 e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
287                 if (!e) {
288                         spin_unlock(&sbi->ino_lock[type]);
289                         goto retry;
290                 }
291                 if (radix_tree_insert(&sbi->ino_root[type], ino, e)) {
292                         spin_unlock(&sbi->ino_lock[type]);
293                         kmem_cache_free(ino_entry_slab, e);
294                         goto retry;
295                 }
296                 memset(e, 0, sizeof(struct ino_entry));
297                 e->ino = ino;
298
299                 list_add_tail(&e->list, &sbi->ino_list[type]);
300         }
301         spin_unlock(&sbi->ino_lock[type]);
302 }
303
304 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
305 {
306         struct ino_entry *e;
307
308         spin_lock(&sbi->ino_lock[type]);
309         e = radix_tree_lookup(&sbi->ino_root[type], ino);
310         if (e) {
311                 list_del(&e->list);
312                 radix_tree_delete(&sbi->ino_root[type], ino);
313                 if (type == ORPHAN_INO)
314                         sbi->n_orphans--;
315                 spin_unlock(&sbi->ino_lock[type]);
316                 kmem_cache_free(ino_entry_slab, e);
317                 return;
318         }
319         spin_unlock(&sbi->ino_lock[type]);
320 }
321
322 void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
323 {
324         /* add new dirty ino entry into list */
325         __add_ino_entry(sbi, ino, type);
326 }
327
328 void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
329 {
330         /* remove dirty ino entry from list */
331         __remove_ino_entry(sbi, ino, type);
332 }
333
334 /* mode should be APPEND_INO or UPDATE_INO */
335 bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
336 {
337         struct ino_entry *e;
338         spin_lock(&sbi->ino_lock[mode]);
339         e = radix_tree_lookup(&sbi->ino_root[mode], ino);
340         spin_unlock(&sbi->ino_lock[mode]);
341         return e ? true : false;
342 }
343
344 void release_dirty_inode(struct f2fs_sb_info *sbi)
345 {
346         struct ino_entry *e, *tmp;
347         int i;
348
349         for (i = APPEND_INO; i <= UPDATE_INO; i++) {
350                 spin_lock(&sbi->ino_lock[i]);
351                 list_for_each_entry_safe(e, tmp, &sbi->ino_list[i], list) {
352                         list_del(&e->list);
353                         radix_tree_delete(&sbi->ino_root[i], e->ino);
354                         kmem_cache_free(ino_entry_slab, e);
355                 }
356                 spin_unlock(&sbi->ino_lock[i]);
357         }
358 }
359
360 int acquire_orphan_inode(struct f2fs_sb_info *sbi)
361 {
362         int err = 0;
363
364         spin_lock(&sbi->ino_lock[ORPHAN_INO]);
365         if (unlikely(sbi->n_orphans >= sbi->max_orphans))
366                 err = -ENOSPC;
367         else
368                 sbi->n_orphans++;
369         spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
370
371         return err;
372 }
373
374 void release_orphan_inode(struct f2fs_sb_info *sbi)
375 {
376         spin_lock(&sbi->ino_lock[ORPHAN_INO]);
377         f2fs_bug_on(sbi->n_orphans == 0);
378         sbi->n_orphans--;
379         spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
380 }
381
382 void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
383 {
384         /* add new orphan ino entry into list */
385         __add_ino_entry(sbi, ino, ORPHAN_INO);
386 }
387
388 void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
389 {
390         /* remove orphan entry from orphan list */
391         __remove_ino_entry(sbi, ino, ORPHAN_INO);
392 }
393
394 static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
395 {
396         struct inode *inode = f2fs_iget(sbi->sb, ino);
397         f2fs_bug_on(IS_ERR(inode));
398         clear_nlink(inode);
399
400         /* truncate all the data during iput */
401         iput(inode);
402 }
403
404 void recover_orphan_inodes(struct f2fs_sb_info *sbi)
405 {
406         block_t start_blk, orphan_blkaddr, i, j;
407
408         if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
409                 return;
410
411         sbi->por_doing = true;
412
413         start_blk = __start_cp_addr(sbi) + 1 +
414                 le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
415         orphan_blkaddr = __start_sum_addr(sbi) - 1;
416
417         ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
418
419         for (i = 0; i < orphan_blkaddr; i++) {
420                 struct page *page = get_meta_page(sbi, start_blk + i);
421                 struct f2fs_orphan_block *orphan_blk;
422
423                 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
424                 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
425                         nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
426                         recover_orphan_inode(sbi, ino);
427                 }
428                 f2fs_put_page(page, 1);
429         }
430         /* clear Orphan Flag */
431         clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
432         sbi->por_doing = false;
433         return;
434 }
435
436 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
437 {
438         struct list_head *head;
439         struct f2fs_orphan_block *orphan_blk = NULL;
440         unsigned int nentries = 0;
441         unsigned short index;
442         unsigned short orphan_blocks =
443                         (unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans);
444         struct page *page = NULL;
445         struct ino_entry *orphan = NULL;
446
447         for (index = 0; index < orphan_blocks; index++)
448                 grab_meta_page(sbi, start_blk + index);
449
450         index = 1;
451         spin_lock(&sbi->ino_lock[ORPHAN_INO]);
452         head = &sbi->ino_list[ORPHAN_INO];
453
454         /* loop for each orphan inode entry and write them in Jornal block */
455         list_for_each_entry(orphan, head, list) {
456                 if (!page) {
457                         page = find_get_page(META_MAPPING(sbi), start_blk++);
458                         f2fs_bug_on(!page);
459                         orphan_blk =
460                                 (struct f2fs_orphan_block *)page_address(page);
461                         memset(orphan_blk, 0, sizeof(*orphan_blk));
462                         f2fs_put_page(page, 0);
463                 }
464
465                 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
466
467                 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
468                         /*
469                          * an orphan block is full of 1020 entries,
470                          * then we need to flush current orphan blocks
471                          * and bring another one in memory
472                          */
473                         orphan_blk->blk_addr = cpu_to_le16(index);
474                         orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
475                         orphan_blk->entry_count = cpu_to_le32(nentries);
476                         set_page_dirty(page);
477                         f2fs_put_page(page, 1);
478                         index++;
479                         nentries = 0;
480                         page = NULL;
481                 }
482         }
483
484         if (page) {
485                 orphan_blk->blk_addr = cpu_to_le16(index);
486                 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
487                 orphan_blk->entry_count = cpu_to_le32(nentries);
488                 set_page_dirty(page);
489                 f2fs_put_page(page, 1);
490         }
491
492         spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
493 }
494
495 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
496                                 block_t cp_addr, unsigned long long *version)
497 {
498         struct page *cp_page_1, *cp_page_2 = NULL;
499         unsigned long blk_size = sbi->blocksize;
500         struct f2fs_checkpoint *cp_block;
501         unsigned long long cur_version = 0, pre_version = 0;
502         size_t crc_offset;
503         __u32 crc = 0;
504
505         /* Read the 1st cp block in this CP pack */
506         cp_page_1 = get_meta_page(sbi, cp_addr);
507
508         /* get the version number */
509         cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
510         crc_offset = le32_to_cpu(cp_block->checksum_offset);
511         if (crc_offset >= blk_size)
512                 goto invalid_cp1;
513
514         crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
515         if (!f2fs_crc_valid(crc, cp_block, crc_offset))
516                 goto invalid_cp1;
517
518         pre_version = cur_cp_version(cp_block);
519
520         /* Read the 2nd cp block in this CP pack */
521         cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
522         cp_page_2 = get_meta_page(sbi, cp_addr);
523
524         cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
525         crc_offset = le32_to_cpu(cp_block->checksum_offset);
526         if (crc_offset >= blk_size)
527                 goto invalid_cp2;
528
529         crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
530         if (!f2fs_crc_valid(crc, cp_block, crc_offset))
531                 goto invalid_cp2;
532
533         cur_version = cur_cp_version(cp_block);
534
535         if (cur_version == pre_version) {
536                 *version = cur_version;
537                 f2fs_put_page(cp_page_2, 1);
538                 return cp_page_1;
539         }
540 invalid_cp2:
541         f2fs_put_page(cp_page_2, 1);
542 invalid_cp1:
543         f2fs_put_page(cp_page_1, 1);
544         return NULL;
545 }
546
547 int get_valid_checkpoint(struct f2fs_sb_info *sbi)
548 {
549         struct f2fs_checkpoint *cp_block;
550         struct f2fs_super_block *fsb = sbi->raw_super;
551         struct page *cp1, *cp2, *cur_page;
552         unsigned long blk_size = sbi->blocksize;
553         unsigned long long cp1_version = 0, cp2_version = 0;
554         unsigned long long cp_start_blk_no;
555         unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
556         block_t cp_blk_no;
557         int i;
558
559         sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
560         if (!sbi->ckpt)
561                 return -ENOMEM;
562         /*
563          * Finding out valid cp block involves read both
564          * sets( cp pack1 and cp pack 2)
565          */
566         cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
567         cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
568
569         /* The second checkpoint pack should start at the next segment */
570         cp_start_blk_no += ((unsigned long long)1) <<
571                                 le32_to_cpu(fsb->log_blocks_per_seg);
572         cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
573
574         if (cp1 && cp2) {
575                 if (ver_after(cp2_version, cp1_version))
576                         cur_page = cp2;
577                 else
578                         cur_page = cp1;
579         } else if (cp1) {
580                 cur_page = cp1;
581         } else if (cp2) {
582                 cur_page = cp2;
583         } else {
584                 goto fail_no_cp;
585         }
586
587         cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
588         memcpy(sbi->ckpt, cp_block, blk_size);
589
590         if (cp_blks <= 1)
591                 goto done;
592
593         cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
594         if (cur_page == cp2)
595                 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
596
597         for (i = 1; i < cp_blks; i++) {
598                 void *sit_bitmap_ptr;
599                 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
600
601                 cur_page = get_meta_page(sbi, cp_blk_no + i);
602                 sit_bitmap_ptr = page_address(cur_page);
603                 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
604                 f2fs_put_page(cur_page, 1);
605         }
606 done:
607         f2fs_put_page(cp1, 1);
608         f2fs_put_page(cp2, 1);
609         return 0;
610
611 fail_no_cp:
612         kfree(sbi->ckpt);
613         return -EINVAL;
614 }
615
616 static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
617 {
618         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
619
620         if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR))
621                 return -EEXIST;
622
623         set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
624         F2FS_I(inode)->dirty_dir = new;
625         list_add_tail(&new->list, &sbi->dir_inode_list);
626         stat_inc_dirty_dir(sbi);
627         return 0;
628 }
629
630 void set_dirty_dir_page(struct inode *inode, struct page *page)
631 {
632         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
633         struct dir_inode_entry *new;
634         int ret = 0;
635
636         if (!S_ISDIR(inode->i_mode))
637                 return;
638
639         new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
640         new->inode = inode;
641         INIT_LIST_HEAD(&new->list);
642
643         spin_lock(&sbi->dir_inode_lock);
644         ret = __add_dirty_inode(inode, new);
645         inode_inc_dirty_dents(inode);
646         SetPagePrivate(page);
647         spin_unlock(&sbi->dir_inode_lock);
648
649         if (ret)
650                 kmem_cache_free(inode_entry_slab, new);
651 }
652
653 void add_dirty_dir_inode(struct inode *inode)
654 {
655         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
656         struct dir_inode_entry *new =
657                         f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
658         int ret = 0;
659
660         new->inode = inode;
661         INIT_LIST_HEAD(&new->list);
662
663         spin_lock(&sbi->dir_inode_lock);
664         ret = __add_dirty_inode(inode, new);
665         spin_unlock(&sbi->dir_inode_lock);
666
667         if (ret)
668                 kmem_cache_free(inode_entry_slab, new);
669 }
670
671 void remove_dirty_dir_inode(struct inode *inode)
672 {
673         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
674         struct dir_inode_entry *entry;
675
676         if (!S_ISDIR(inode->i_mode))
677                 return;
678
679         spin_lock(&sbi->dir_inode_lock);
680         if (get_dirty_dents(inode) ||
681                         !is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) {
682                 spin_unlock(&sbi->dir_inode_lock);
683                 return;
684         }
685
686         entry = F2FS_I(inode)->dirty_dir;
687         list_del(&entry->list);
688         F2FS_I(inode)->dirty_dir = NULL;
689         clear_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
690         stat_dec_dirty_dir(sbi);
691         spin_unlock(&sbi->dir_inode_lock);
692         kmem_cache_free(inode_entry_slab, entry);
693
694         /* Only from the recovery routine */
695         if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
696                 clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
697                 iput(inode);
698         }
699 }
700
701 void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
702 {
703         struct list_head *head;
704         struct dir_inode_entry *entry;
705         struct inode *inode;
706 retry:
707         spin_lock(&sbi->dir_inode_lock);
708
709         head = &sbi->dir_inode_list;
710         if (list_empty(head)) {
711                 spin_unlock(&sbi->dir_inode_lock);
712                 return;
713         }
714         entry = list_entry(head->next, struct dir_inode_entry, list);
715         inode = igrab(entry->inode);
716         spin_unlock(&sbi->dir_inode_lock);
717         if (inode) {
718                 filemap_fdatawrite(inode->i_mapping);
719                 iput(inode);
720         } else {
721                 /*
722                  * We should submit bio, since it exists several
723                  * wribacking dentry pages in the freeing inode.
724                  */
725                 f2fs_submit_merged_bio(sbi, DATA, WRITE);
726         }
727         goto retry;
728 }
729
730 /*
731  * Freeze all the FS-operations for checkpoint.
732  */
733 static int block_operations(struct f2fs_sb_info *sbi)
734 {
735         struct writeback_control wbc = {
736                 .sync_mode = WB_SYNC_ALL,
737                 .nr_to_write = LONG_MAX,
738                 .for_reclaim = 0,
739         };
740         struct blk_plug plug;
741         int err = 0;
742
743         blk_start_plug(&plug);
744
745 retry_flush_dents:
746         f2fs_lock_all(sbi);
747         /* write all the dirty dentry pages */
748         if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
749                 f2fs_unlock_all(sbi);
750                 sync_dirty_dir_inodes(sbi);
751                 if (unlikely(f2fs_cp_error(sbi))) {
752                         err = -EIO;
753                         goto out;
754                 }
755                 goto retry_flush_dents;
756         }
757
758         /*
759          * POR: we should ensure that there are no dirty node pages
760          * until finishing nat/sit flush.
761          */
762 retry_flush_nodes:
763         down_write(&sbi->node_write);
764
765         if (get_pages(sbi, F2FS_DIRTY_NODES)) {
766                 up_write(&sbi->node_write);
767                 sync_node_pages(sbi, 0, &wbc);
768                 if (unlikely(f2fs_cp_error(sbi))) {
769                         f2fs_unlock_all(sbi);
770                         err = -EIO;
771                         goto out;
772                 }
773                 goto retry_flush_nodes;
774         }
775 out:
776         blk_finish_plug(&plug);
777         return err;
778 }
779
780 static void unblock_operations(struct f2fs_sb_info *sbi)
781 {
782         up_write(&sbi->node_write);
783         f2fs_unlock_all(sbi);
784 }
785
786 static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
787 {
788         DEFINE_WAIT(wait);
789
790         for (;;) {
791                 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
792
793                 if (!get_pages(sbi, F2FS_WRITEBACK))
794                         break;
795
796                 io_schedule();
797         }
798         finish_wait(&sbi->cp_wait, &wait);
799 }
800
801 static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
802 {
803         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
804         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
805         nid_t last_nid = 0;
806         block_t start_blk;
807         struct page *cp_page;
808         unsigned int data_sum_blocks, orphan_blocks;
809         __u32 crc32 = 0;
810         void *kaddr;
811         int i;
812         int cp_payload_blks = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
813
814         /*
815          * This avoids to conduct wrong roll-forward operations and uses
816          * metapages, so should be called prior to sync_meta_pages below.
817          */
818         discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
819
820         /* Flush all the NAT/SIT pages */
821         while (get_pages(sbi, F2FS_DIRTY_META)) {
822                 sync_meta_pages(sbi, META, LONG_MAX);
823                 if (unlikely(f2fs_cp_error(sbi)))
824                         return;
825         }
826
827         next_free_nid(sbi, &last_nid);
828
829         /*
830          * modify checkpoint
831          * version number is already updated
832          */
833         ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
834         ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
835         ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
836         for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
837                 ckpt->cur_node_segno[i] =
838                         cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
839                 ckpt->cur_node_blkoff[i] =
840                         cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
841                 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
842                                 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
843         }
844         for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
845                 ckpt->cur_data_segno[i] =
846                         cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
847                 ckpt->cur_data_blkoff[i] =
848                         cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
849                 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
850                                 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
851         }
852
853         ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
854         ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
855         ckpt->next_free_nid = cpu_to_le32(last_nid);
856
857         /* 2 cp  + n data seg summary + orphan inode blocks */
858         data_sum_blocks = npages_for_summary_flush(sbi);
859         if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
860                 set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
861         else
862                 clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
863
864         orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans);
865         ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
866                         orphan_blocks);
867
868         if (is_umount) {
869                 set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
870                 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
871                                 cp_payload_blks + data_sum_blocks +
872                                 orphan_blocks + NR_CURSEG_NODE_TYPE);
873         } else {
874                 clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
875                 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
876                                 cp_payload_blks + data_sum_blocks +
877                                 orphan_blocks);
878         }
879
880         if (sbi->n_orphans)
881                 set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
882         else
883                 clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
884
885         if (sbi->need_fsck)
886                 set_ckpt_flags(ckpt, CP_FSCK_FLAG);
887
888         /* update SIT/NAT bitmap */
889         get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
890         get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
891
892         crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
893         *((__le32 *)((unsigned char *)ckpt +
894                                 le32_to_cpu(ckpt->checksum_offset)))
895                                 = cpu_to_le32(crc32);
896
897         start_blk = __start_cp_addr(sbi);
898
899         /* write out checkpoint buffer at block 0 */
900         cp_page = grab_meta_page(sbi, start_blk++);
901         kaddr = page_address(cp_page);
902         memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
903         set_page_dirty(cp_page);
904         f2fs_put_page(cp_page, 1);
905
906         for (i = 1; i < 1 + cp_payload_blks; i++) {
907                 cp_page = grab_meta_page(sbi, start_blk++);
908                 kaddr = page_address(cp_page);
909                 memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE,
910                                 (1 << sbi->log_blocksize));
911                 set_page_dirty(cp_page);
912                 f2fs_put_page(cp_page, 1);
913         }
914
915         if (sbi->n_orphans) {
916                 write_orphan_inodes(sbi, start_blk);
917                 start_blk += orphan_blocks;
918         }
919
920         write_data_summaries(sbi, start_blk);
921         start_blk += data_sum_blocks;
922         if (is_umount) {
923                 write_node_summaries(sbi, start_blk);
924                 start_blk += NR_CURSEG_NODE_TYPE;
925         }
926
927         /* writeout checkpoint block */
928         cp_page = grab_meta_page(sbi, start_blk);
929         kaddr = page_address(cp_page);
930         memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
931         set_page_dirty(cp_page);
932         f2fs_put_page(cp_page, 1);
933
934         /* wait for previous submitted node/meta pages writeback */
935         wait_on_all_pages_writeback(sbi);
936
937         if (unlikely(f2fs_cp_error(sbi)))
938                 return;
939
940         filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
941         filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
942
943         /* update user_block_counts */
944         sbi->last_valid_block_count = sbi->total_valid_block_count;
945         sbi->alloc_valid_block_count = 0;
946
947         /* Here, we only have one bio having CP pack */
948         sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
949
950         release_dirty_inode(sbi);
951
952         if (unlikely(f2fs_cp_error(sbi)))
953                 return;
954
955         clear_prefree_segments(sbi);
956         F2FS_RESET_SB_DIRT(sbi);
957 }
958
959 /*
960  * We guarantee that this checkpoint procedure will not fail.
961  */
962 void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
963 {
964         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
965         unsigned long long ckpt_ver;
966
967         trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
968
969         mutex_lock(&sbi->cp_mutex);
970
971         if (!sbi->s_dirty)
972                 goto out;
973         if (unlikely(f2fs_cp_error(sbi)))
974                 goto out;
975         if (block_operations(sbi))
976                 goto out;
977
978         trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
979
980         f2fs_submit_merged_bio(sbi, DATA, WRITE);
981         f2fs_submit_merged_bio(sbi, NODE, WRITE);
982         f2fs_submit_merged_bio(sbi, META, WRITE);
983
984         /*
985          * update checkpoint pack index
986          * Increase the version number so that
987          * SIT entries and seg summaries are written at correct place
988          */
989         ckpt_ver = cur_cp_version(ckpt);
990         ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
991
992         /* write cached NAT/SIT entries to NAT/SIT area */
993         flush_nat_entries(sbi);
994         flush_sit_entries(sbi);
995
996         /* unlock all the fs_lock[] in do_checkpoint() */
997         do_checkpoint(sbi, is_umount);
998
999         unblock_operations(sbi);
1000         stat_inc_cp_count(sbi->stat_info);
1001 out:
1002         mutex_unlock(&sbi->cp_mutex);
1003         trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
1004 }
1005
1006 void init_ino_entry_info(struct f2fs_sb_info *sbi)
1007 {
1008         int i;
1009
1010         for (i = 0; i < MAX_INO_ENTRY; i++) {
1011                 INIT_RADIX_TREE(&sbi->ino_root[i], GFP_ATOMIC);
1012                 spin_lock_init(&sbi->ino_lock[i]);
1013                 INIT_LIST_HEAD(&sbi->ino_list[i]);
1014         }
1015
1016         /*
1017          * considering 512 blocks in a segment 8 blocks are needed for cp
1018          * and log segment summaries. Remaining blocks are used to keep
1019          * orphan entries with the limitation one reserved segment
1020          * for cp pack we can have max 1020*504 orphan entries
1021          */
1022         sbi->n_orphans = 0;
1023         sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1024                         NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
1025 }
1026
1027 int __init create_checkpoint_caches(void)
1028 {
1029         ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1030                         sizeof(struct ino_entry));
1031         if (!ino_entry_slab)
1032                 return -ENOMEM;
1033         inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
1034                         sizeof(struct dir_inode_entry));
1035         if (!inode_entry_slab) {
1036                 kmem_cache_destroy(ino_entry_slab);
1037                 return -ENOMEM;
1038         }
1039         return 0;
1040 }
1041
1042 void destroy_checkpoint_caches(void)
1043 {
1044         kmem_cache_destroy(ino_entry_slab);
1045         kmem_cache_destroy(inode_entry_slab);
1046 }