4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache *fsync_entry_slab;
50 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
52 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
53 > sbi->user_block_count)
58 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
61 struct fsync_inode_entry *entry;
63 list_for_each_entry(entry, head, list)
64 if (entry->inode->i_ino == ino)
70 static int recover_dentry(struct inode *inode, struct page *ipage)
72 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
73 nid_t pino = le32_to_cpu(raw_inode->i_pino);
74 struct f2fs_dir_entry *de;
77 struct inode *dir, *einode;
80 dir = f2fs_iget(inode->i_sb, pino);
86 name.len = le32_to_cpu(raw_inode->i_namelen);
87 name.name = raw_inode->i_name;
89 if (unlikely(name.len > F2FS_NAME_LEN)) {
95 de = f2fs_find_entry(dir, &name, &page);
96 if (de && inode->i_ino == le32_to_cpu(de->ino)) {
97 clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
101 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
102 if (IS_ERR(einode)) {
104 err = PTR_ERR(einode);
109 err = acquire_orphan_inode(F2FS_I_SB(inode));
114 f2fs_delete_entry(de, page, einode);
118 err = __f2fs_add_link(dir, &name, inode);
122 if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
125 add_dirty_dir_inode(dir);
126 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
133 f2fs_put_page(page, 0);
137 f2fs_msg(inode->i_sb, KERN_NOTICE,
138 "%s: ino = %x, name = %s, dir = %lx, err = %d",
139 __func__, ino_of_node(ipage), raw_inode->i_name,
140 IS_ERR(dir) ? 0 : dir->i_ino, err);
144 static void recover_inode(struct inode *inode, struct page *page)
146 struct f2fs_inode *raw = F2FS_INODE(page);
148 inode->i_mode = le16_to_cpu(raw->i_mode);
149 i_size_write(inode, le64_to_cpu(raw->i_size));
150 inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
151 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
152 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
153 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
154 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
155 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
157 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
158 ino_of_node(page), F2FS_INODE(page)->i_name);
161 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
163 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
164 struct curseg_info *curseg;
165 struct page *page = NULL;
169 /* get node pages in the current segment */
170 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
171 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
174 struct fsync_inode_entry *entry;
176 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
179 page = get_meta_page_ra(sbi, blkaddr);
181 if (cp_ver != cpver_of_node(page))
184 if (!is_fsync_dnode(page))
187 entry = get_fsync_inode(head, ino_of_node(page));
189 if (IS_INODE(page) && is_dent_dnode(page))
190 set_inode_flag(F2FS_I(entry->inode),
193 if (IS_INODE(page) && is_dent_dnode(page)) {
194 err = recover_inode_page(sbi, page);
199 /* add this fsync inode to the list */
200 entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
206 * CP | dnode(F) | inode(DF)
207 * For this case, we should not give up now.
209 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
210 if (IS_ERR(entry->inode)) {
211 err = PTR_ERR(entry->inode);
212 kmem_cache_free(fsync_entry_slab, entry);
217 list_add_tail(&entry->list, head);
219 entry->blkaddr = blkaddr;
221 if (IS_INODE(page)) {
222 entry->last_inode = blkaddr;
223 if (is_dent_dnode(page))
224 entry->last_dentry = blkaddr;
227 /* check next segment */
228 blkaddr = next_blkaddr_of_node(page);
229 f2fs_put_page(page, 1);
231 f2fs_put_page(page, 1);
235 static void destroy_fsync_dnodes(struct list_head *head)
237 struct fsync_inode_entry *entry, *tmp;
239 list_for_each_entry_safe(entry, tmp, head, list) {
241 list_del(&entry->list);
242 kmem_cache_free(fsync_entry_slab, entry);
246 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
247 block_t blkaddr, struct dnode_of_data *dn)
249 struct seg_entry *sentry;
250 unsigned int segno = GET_SEGNO(sbi, blkaddr);
251 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
252 struct f2fs_summary_block *sum_node;
253 struct f2fs_summary sum;
254 struct page *sum_page, *node_page;
261 sentry = get_seg_entry(sbi, segno);
262 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
265 /* Get the previous summary */
266 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
267 struct curseg_info *curseg = CURSEG_I(sbi, i);
268 if (curseg->segno == segno) {
269 sum = curseg->sum_blk->entries[blkoff];
274 sum_page = get_sum_page(sbi, segno);
275 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
276 sum = sum_node->entries[blkoff];
277 f2fs_put_page(sum_page, 1);
279 /* Use the locked dnode page and inode */
280 nid = le32_to_cpu(sum.nid);
281 if (dn->inode->i_ino == nid) {
282 struct dnode_of_data tdn = *dn;
284 tdn.node_page = dn->inode_page;
285 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
286 truncate_data_blocks_range(&tdn, 1);
288 } else if (dn->nid == nid) {
289 struct dnode_of_data tdn = *dn;
290 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
291 truncate_data_blocks_range(&tdn, 1);
295 /* Get the node page */
296 node_page = get_node_page(sbi, nid);
297 if (IS_ERR(node_page))
298 return PTR_ERR(node_page);
300 offset = ofs_of_node(node_page);
301 ino = ino_of_node(node_page);
302 f2fs_put_page(node_page, 1);
304 if (ino != dn->inode->i_ino) {
305 /* Deallocate previous index in the node page */
306 inode = f2fs_iget(sbi->sb, ino);
308 return PTR_ERR(inode);
313 bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
314 le16_to_cpu(sum.ofs_in_node);
316 if (ino != dn->inode->i_ino) {
317 truncate_hole(inode, bidx, bidx + 1);
320 struct dnode_of_data tdn;
321 set_new_dnode(&tdn, inode, dn->inode_page, NULL, 0);
322 if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
324 if (tdn.data_blkaddr != NULL_ADDR)
325 truncate_data_blocks_range(&tdn, 1);
326 f2fs_put_page(tdn.node_page, 1);
331 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
332 struct page *page, block_t blkaddr)
334 struct f2fs_inode_info *fi = F2FS_I(inode);
335 unsigned int start, end;
336 struct dnode_of_data dn;
337 struct f2fs_summary sum;
339 int err = 0, recovered = 0;
341 /* step 1: recover xattr */
342 if (IS_INODE(page)) {
343 recover_inline_xattr(inode, page);
344 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
345 recover_xattr_data(inode, page, blkaddr);
349 /* step 2: recover inline data */
350 if (recover_inline_data(inode, page))
353 /* step 3: recover data indices */
354 start = start_bidx_of_node(ofs_of_node(page), fi);
355 end = start + ADDRS_PER_PAGE(page, fi);
359 set_new_dnode(&dn, inode, NULL, NULL, 0);
361 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
367 f2fs_wait_on_page_writeback(dn.node_page, NODE);
369 get_node_info(sbi, dn.nid, &ni);
370 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
371 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
373 for (; start < end; start++) {
376 src = datablock_addr(dn.node_page, dn.ofs_in_node);
377 dest = datablock_addr(page, dn.ofs_in_node);
379 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
380 if (src == NULL_ADDR) {
381 err = reserve_new_block(&dn);
382 /* We should not get -ENOSPC */
383 f2fs_bug_on(sbi, err);
386 /* Check the previous node page having this index */
387 err = check_index_in_prev_nodes(sbi, dest, &dn);
391 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
393 /* write dummy data page */
394 recover_data_page(sbi, NULL, &sum, src, dest);
395 update_extent_cache(dest, &dn);
401 /* write node page in place */
402 set_summary(&sum, dn.nid, 0, 0);
403 if (IS_INODE(dn.node_page))
404 sync_inode_page(&dn);
406 copy_node_footer(dn.node_page, page);
407 fill_node_footer(dn.node_page, dn.nid, ni.ino,
408 ofs_of_node(page), false);
409 set_page_dirty(dn.node_page);
414 f2fs_msg(sbi->sb, KERN_NOTICE,
415 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
416 inode->i_ino, recovered, err);
420 static int recover_data(struct f2fs_sb_info *sbi,
421 struct list_head *head, int type)
423 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
424 struct curseg_info *curseg;
425 struct page *page = NULL;
429 /* get node pages in the current segment */
430 curseg = CURSEG_I(sbi, type);
431 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
434 struct fsync_inode_entry *entry;
436 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
439 page = get_meta_page_ra(sbi, blkaddr);
441 if (cp_ver != cpver_of_node(page)) {
442 f2fs_put_page(page, 1);
446 entry = get_fsync_inode(head, ino_of_node(page));
450 * inode(x) | CP | inode(x) | dnode(F)
451 * In this case, we can lose the latest inode(x).
452 * So, call recover_inode for the inode update.
454 if (entry->last_inode == blkaddr)
455 recover_inode(entry->inode, page);
456 if (entry->last_dentry == blkaddr) {
457 err = recover_dentry(entry->inode, page);
459 f2fs_put_page(page, 1);
463 err = do_recover_data(sbi, entry->inode, page, blkaddr);
465 f2fs_put_page(page, 1);
469 if (entry->blkaddr == blkaddr) {
471 list_del(&entry->list);
472 kmem_cache_free(fsync_entry_slab, entry);
475 /* check next segment */
476 blkaddr = next_blkaddr_of_node(page);
477 f2fs_put_page(page, 1);
480 allocate_new_segments(sbi);
484 int recover_fsync_data(struct f2fs_sb_info *sbi)
486 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
487 struct list_head inode_list;
490 bool need_writecp = false;
492 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
493 sizeof(struct fsync_inode_entry));
494 if (!fsync_entry_slab)
497 INIT_LIST_HEAD(&inode_list);
499 /* step #1: find fsynced inode numbers */
500 sbi->por_doing = true;
502 /* prevent checkpoint */
503 mutex_lock(&sbi->cp_mutex);
505 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
507 err = find_fsync_dnodes(sbi, &inode_list);
511 if (list_empty(&inode_list))
516 /* step #2: recover data */
517 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
519 f2fs_bug_on(sbi, !list_empty(&inode_list));
521 destroy_fsync_dnodes(&inode_list);
522 kmem_cache_destroy(fsync_entry_slab);
524 /* truncate meta pages to be used by the recovery */
525 truncate_inode_pages_range(META_MAPPING(sbi),
526 MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
529 truncate_inode_pages_final(NODE_MAPPING(sbi));
530 truncate_inode_pages_final(META_MAPPING(sbi));
533 sbi->por_doing = false;
535 discard_next_dnode(sbi, blkaddr);
537 /* Flush all the NAT/SIT pages */
538 while (get_pages(sbi, F2FS_DIRTY_META))
539 sync_meta_pages(sbi, META, LONG_MAX);
540 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
541 mutex_unlock(&sbi->cp_mutex);
542 } else if (need_writecp) {
543 struct cp_control cpc = {
546 mutex_unlock(&sbi->cp_mutex);
547 write_checkpoint(sbi, &cpc);
549 mutex_unlock(&sbi->cp_mutex);