4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/vmalloc.h>
18 #include <linux/swap.h>
23 #include <trace/events/f2fs.h>
25 #define __reverse_ffz(x) __reverse_ffs(~(x))
27 static struct kmem_cache *discard_entry_slab;
30 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
31 * MSB and LSB are reversed in a byte by f2fs_set_bit.
33 static inline unsigned long __reverse_ffs(unsigned long word)
37 #if BITS_PER_LONG == 64
38 if ((word & 0xffffffff) == 0) {
43 if ((word & 0xffff) == 0) {
47 if ((word & 0xff) == 0) {
51 if ((word & 0xf0) == 0)
55 if ((word & 0xc) == 0)
59 if ((word & 0x2) == 0)
65 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
66 * f2fs_set_bit makes MSB and LSB reversed in a byte.
69 * f2fs_set_bit(0, bitmap) => 0000 0001
70 * f2fs_set_bit(7, bitmap) => 1000 0000
72 static unsigned long __find_rev_next_bit(const unsigned long *addr,
73 unsigned long size, unsigned long offset)
75 const unsigned long *p = addr + BIT_WORD(offset);
76 unsigned long result = offset & ~(BITS_PER_LONG - 1);
78 unsigned long mask, submask;
79 unsigned long quot, rest;
85 offset %= BITS_PER_LONG;
90 quot = (offset >> 3) << 3;
93 submask = (unsigned char)(0xff << rest) >> rest;
97 if (size < BITS_PER_LONG)
102 size -= BITS_PER_LONG;
103 result += BITS_PER_LONG;
105 while (size & ~(BITS_PER_LONG-1)) {
109 result += BITS_PER_LONG;
110 size -= BITS_PER_LONG;
116 tmp &= (~0UL >> (BITS_PER_LONG - size));
117 if (tmp == 0UL) /* Are any bits set? */
118 return result + size; /* Nope. */
120 return result + __reverse_ffs(tmp);
123 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
124 unsigned long size, unsigned long offset)
126 const unsigned long *p = addr + BIT_WORD(offset);
127 unsigned long result = offset & ~(BITS_PER_LONG - 1);
129 unsigned long mask, submask;
130 unsigned long quot, rest;
136 offset %= BITS_PER_LONG;
141 quot = (offset >> 3) << 3;
143 mask = ~(~0UL << quot);
144 submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
148 if (size < BITS_PER_LONG)
153 size -= BITS_PER_LONG;
154 result += BITS_PER_LONG;
156 while (size & ~(BITS_PER_LONG - 1)) {
160 result += BITS_PER_LONG;
161 size -= BITS_PER_LONG;
169 if (tmp == ~0UL) /* Are any bits zero? */
170 return result + size; /* Nope. */
172 return result + __reverse_ffz(tmp);
176 * This function balances dirty node and dentry pages.
177 * In addition, it controls garbage collection.
179 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
182 * We should do GC or end up with checkpoint, if there are so many dirty
183 * dir/node pages without enough free segments.
185 if (has_not_enough_free_secs(sbi, 0)) {
186 mutex_lock(&sbi->gc_mutex);
191 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
193 /* check the # of cached NAT entries and prefree segments */
194 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
195 excess_prefree_segs(sbi))
196 f2fs_sync_fs(sbi->sb, true);
199 static int issue_flush_thread(void *data)
201 struct f2fs_sb_info *sbi = data;
202 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
203 wait_queue_head_t *q = &fcc->flush_wait_queue;
205 if (kthread_should_stop())
208 spin_lock(&fcc->issue_lock);
209 if (fcc->issue_list) {
210 fcc->dispatch_list = fcc->issue_list;
211 fcc->issue_list = fcc->issue_tail = NULL;
213 spin_unlock(&fcc->issue_lock);
215 if (fcc->dispatch_list) {
216 struct bio *bio = bio_alloc(GFP_NOIO, 0);
217 struct flush_cmd *cmd, *next;
220 bio->bi_bdev = sbi->sb->s_bdev;
221 ret = submit_bio_wait(WRITE_FLUSH, bio);
223 for (cmd = fcc->dispatch_list; cmd; cmd = next) {
226 complete(&cmd->wait);
229 fcc->dispatch_list = NULL;
232 wait_event_interruptible(*q,
233 kthread_should_stop() || fcc->issue_list);
237 int f2fs_issue_flush(struct f2fs_sb_info *sbi)
239 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
240 struct flush_cmd cmd;
242 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
243 test_opt(sbi, FLUSH_MERGE));
245 if (test_opt(sbi, NOBARRIER))
248 if (!test_opt(sbi, FLUSH_MERGE))
249 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
251 init_completion(&cmd.wait);
254 spin_lock(&fcc->issue_lock);
256 fcc->issue_tail->next = &cmd;
258 fcc->issue_list = &cmd;
259 fcc->issue_tail = &cmd;
260 spin_unlock(&fcc->issue_lock);
262 if (!fcc->dispatch_list)
263 wake_up(&fcc->flush_wait_queue);
265 wait_for_completion(&cmd.wait);
270 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
272 dev_t dev = sbi->sb->s_bdev->bd_dev;
273 struct flush_cmd_control *fcc;
276 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
279 spin_lock_init(&fcc->issue_lock);
280 init_waitqueue_head(&fcc->flush_wait_queue);
281 SM_I(sbi)->cmd_control_info = fcc;
282 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
283 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
284 if (IS_ERR(fcc->f2fs_issue_flush)) {
285 err = PTR_ERR(fcc->f2fs_issue_flush);
287 SM_I(sbi)->cmd_control_info = NULL;
294 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
296 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
298 if (fcc && fcc->f2fs_issue_flush)
299 kthread_stop(fcc->f2fs_issue_flush);
301 SM_I(sbi)->cmd_control_info = NULL;
304 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
305 enum dirty_type dirty_type)
307 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
309 /* need not be added */
310 if (IS_CURSEG(sbi, segno))
313 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
314 dirty_i->nr_dirty[dirty_type]++;
316 if (dirty_type == DIRTY) {
317 struct seg_entry *sentry = get_seg_entry(sbi, segno);
318 enum dirty_type t = sentry->type;
320 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
321 dirty_i->nr_dirty[t]++;
325 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
326 enum dirty_type dirty_type)
328 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
330 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
331 dirty_i->nr_dirty[dirty_type]--;
333 if (dirty_type == DIRTY) {
334 struct seg_entry *sentry = get_seg_entry(sbi, segno);
335 enum dirty_type t = sentry->type;
337 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
338 dirty_i->nr_dirty[t]--;
340 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
341 clear_bit(GET_SECNO(sbi, segno),
342 dirty_i->victim_secmap);
347 * Should not occur error such as -ENOMEM.
348 * Adding dirty entry into seglist is not critical operation.
349 * If a given segment is one of current working segments, it won't be added.
351 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
353 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
354 unsigned short valid_blocks;
356 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
359 mutex_lock(&dirty_i->seglist_lock);
361 valid_blocks = get_valid_blocks(sbi, segno, 0);
363 if (valid_blocks == 0) {
364 __locate_dirty_segment(sbi, segno, PRE);
365 __remove_dirty_segment(sbi, segno, DIRTY);
366 } else if (valid_blocks < sbi->blocks_per_seg) {
367 __locate_dirty_segment(sbi, segno, DIRTY);
369 /* Recovery routine with SSR needs this */
370 __remove_dirty_segment(sbi, segno, DIRTY);
373 mutex_unlock(&dirty_i->seglist_lock);
376 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
377 block_t blkstart, block_t blklen)
379 sector_t start = SECTOR_FROM_BLOCK(sbi, blkstart);
380 sector_t len = SECTOR_FROM_BLOCK(sbi, blklen);
381 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
382 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
385 void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
387 if (f2fs_issue_discard(sbi, blkaddr, 1)) {
388 struct page *page = grab_meta_page(sbi, blkaddr);
389 /* zero-filled page */
390 set_page_dirty(page);
391 f2fs_put_page(page, 1);
395 static void add_discard_addrs(struct f2fs_sb_info *sbi,
396 unsigned int segno, struct seg_entry *se)
398 struct list_head *head = &SM_I(sbi)->discard_list;
399 struct discard_entry *new;
400 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
401 int max_blocks = sbi->blocks_per_seg;
402 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
403 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
404 unsigned long dmap[entries];
405 unsigned int start = 0, end = -1;
408 if (!test_opt(sbi, DISCARD))
411 /* zero block will be discarded through the prefree list */
412 if (!se->valid_blocks || se->valid_blocks == max_blocks)
415 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
416 for (i = 0; i < entries; i++)
417 dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
419 while (SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
420 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
421 if (start >= max_blocks)
424 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
426 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
427 INIT_LIST_HEAD(&new->list);
428 new->blkaddr = START_BLOCK(sbi, segno) + start;
429 new->len = end - start;
431 list_add_tail(&new->list, head);
432 SM_I(sbi)->nr_discards += end - start;
437 * Should call clear_prefree_segments after checkpoint is done.
439 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
441 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
443 unsigned int total_segs = TOTAL_SEGS(sbi);
445 mutex_lock(&dirty_i->seglist_lock);
446 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], total_segs)
447 __set_test_and_free(sbi, segno);
448 mutex_unlock(&dirty_i->seglist_lock);
451 void clear_prefree_segments(struct f2fs_sb_info *sbi)
453 struct list_head *head = &(SM_I(sbi)->discard_list);
454 struct discard_entry *entry, *this;
455 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
456 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
457 unsigned int total_segs = TOTAL_SEGS(sbi);
458 unsigned int start = 0, end = -1;
460 mutex_lock(&dirty_i->seglist_lock);
464 start = find_next_bit(prefree_map, total_segs, end + 1);
465 if (start >= total_segs)
467 end = find_next_zero_bit(prefree_map, total_segs, start + 1);
469 for (i = start; i < end; i++)
470 clear_bit(i, prefree_map);
472 dirty_i->nr_dirty[PRE] -= end - start;
474 if (!test_opt(sbi, DISCARD))
477 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
478 (end - start) << sbi->log_blocks_per_seg);
480 mutex_unlock(&dirty_i->seglist_lock);
482 /* send small discards */
483 list_for_each_entry_safe(entry, this, head, list) {
484 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
485 list_del(&entry->list);
486 SM_I(sbi)->nr_discards -= entry->len;
487 kmem_cache_free(discard_entry_slab, entry);
491 static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
493 struct sit_info *sit_i = SIT_I(sbi);
494 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
495 sit_i->dirty_sentries++;
498 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
499 unsigned int segno, int modified)
501 struct seg_entry *se = get_seg_entry(sbi, segno);
504 __mark_sit_entry_dirty(sbi, segno);
507 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
509 struct seg_entry *se;
510 unsigned int segno, offset;
511 long int new_vblocks;
513 segno = GET_SEGNO(sbi, blkaddr);
515 se = get_seg_entry(sbi, segno);
516 new_vblocks = se->valid_blocks + del;
517 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
519 f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
520 (new_vblocks > sbi->blocks_per_seg)));
522 se->valid_blocks = new_vblocks;
523 se->mtime = get_mtime(sbi);
524 SIT_I(sbi)->max_mtime = se->mtime;
526 /* Update valid block bitmap */
528 if (f2fs_set_bit(offset, se->cur_valid_map))
531 if (!f2fs_clear_bit(offset, se->cur_valid_map))
534 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
535 se->ckpt_valid_blocks += del;
537 __mark_sit_entry_dirty(sbi, segno);
539 /* update total number of valid blocks to be written in ckpt area */
540 SIT_I(sbi)->written_valid_blocks += del;
542 if (sbi->segs_per_sec > 1)
543 get_sec_entry(sbi, segno)->valid_blocks += del;
546 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
548 update_sit_entry(sbi, new, 1);
549 if (GET_SEGNO(sbi, old) != NULL_SEGNO)
550 update_sit_entry(sbi, old, -1);
552 locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
553 locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
556 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
558 unsigned int segno = GET_SEGNO(sbi, addr);
559 struct sit_info *sit_i = SIT_I(sbi);
561 f2fs_bug_on(addr == NULL_ADDR);
562 if (addr == NEW_ADDR)
565 /* add it into sit main buffer */
566 mutex_lock(&sit_i->sentry_lock);
568 update_sit_entry(sbi, addr, -1);
570 /* add it into dirty seglist */
571 locate_dirty_segment(sbi, segno);
573 mutex_unlock(&sit_i->sentry_lock);
577 * This function should be resided under the curseg_mutex lock
579 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
580 struct f2fs_summary *sum)
582 struct curseg_info *curseg = CURSEG_I(sbi, type);
583 void *addr = curseg->sum_blk;
584 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
585 memcpy(addr, sum, sizeof(struct f2fs_summary));
589 * Calculate the number of current summary pages for writing
591 int npages_for_summary_flush(struct f2fs_sb_info *sbi)
593 int valid_sum_count = 0;
596 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
597 if (sbi->ckpt->alloc_type[i] == SSR)
598 valid_sum_count += sbi->blocks_per_seg;
600 valid_sum_count += curseg_blkoff(sbi, i);
603 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
604 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
605 if (valid_sum_count <= sum_in_page)
607 else if ((valid_sum_count - sum_in_page) <=
608 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
614 * Caller should put this summary page
616 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
618 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
621 static void write_sum_page(struct f2fs_sb_info *sbi,
622 struct f2fs_summary_block *sum_blk, block_t blk_addr)
624 struct page *page = grab_meta_page(sbi, blk_addr);
625 void *kaddr = page_address(page);
626 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
627 set_page_dirty(page);
628 f2fs_put_page(page, 1);
631 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
633 struct curseg_info *curseg = CURSEG_I(sbi, type);
634 unsigned int segno = curseg->segno + 1;
635 struct free_segmap_info *free_i = FREE_I(sbi);
637 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
638 return !test_bit(segno, free_i->free_segmap);
643 * Find a new segment from the free segments bitmap to right order
644 * This function should be returned with success, otherwise BUG
646 static void get_new_segment(struct f2fs_sb_info *sbi,
647 unsigned int *newseg, bool new_sec, int dir)
649 struct free_segmap_info *free_i = FREE_I(sbi);
650 unsigned int segno, secno, zoneno;
651 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
652 unsigned int hint = *newseg / sbi->segs_per_sec;
653 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
654 unsigned int left_start = hint;
659 write_lock(&free_i->segmap_lock);
661 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
662 segno = find_next_zero_bit(free_i->free_segmap,
663 TOTAL_SEGS(sbi), *newseg + 1);
664 if (segno - *newseg < sbi->segs_per_sec -
665 (*newseg % sbi->segs_per_sec))
669 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
670 if (secno >= TOTAL_SECS(sbi)) {
671 if (dir == ALLOC_RIGHT) {
672 secno = find_next_zero_bit(free_i->free_secmap,
674 f2fs_bug_on(secno >= TOTAL_SECS(sbi));
677 left_start = hint - 1;
683 while (test_bit(left_start, free_i->free_secmap)) {
684 if (left_start > 0) {
688 left_start = find_next_zero_bit(free_i->free_secmap,
690 f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
696 segno = secno * sbi->segs_per_sec;
697 zoneno = secno / sbi->secs_per_zone;
699 /* give up on finding another zone */
702 if (sbi->secs_per_zone == 1)
704 if (zoneno == old_zoneno)
706 if (dir == ALLOC_LEFT) {
707 if (!go_left && zoneno + 1 >= total_zones)
709 if (go_left && zoneno == 0)
712 for (i = 0; i < NR_CURSEG_TYPE; i++)
713 if (CURSEG_I(sbi, i)->zone == zoneno)
716 if (i < NR_CURSEG_TYPE) {
717 /* zone is in user, try another */
719 hint = zoneno * sbi->secs_per_zone - 1;
720 else if (zoneno + 1 >= total_zones)
723 hint = (zoneno + 1) * sbi->secs_per_zone;
725 goto find_other_zone;
728 /* set it as dirty segment in free segmap */
729 f2fs_bug_on(test_bit(segno, free_i->free_segmap));
730 __set_inuse(sbi, segno);
732 write_unlock(&free_i->segmap_lock);
735 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
737 struct curseg_info *curseg = CURSEG_I(sbi, type);
738 struct summary_footer *sum_footer;
740 curseg->segno = curseg->next_segno;
741 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
742 curseg->next_blkoff = 0;
743 curseg->next_segno = NULL_SEGNO;
745 sum_footer = &(curseg->sum_blk->footer);
746 memset(sum_footer, 0, sizeof(struct summary_footer));
747 if (IS_DATASEG(type))
748 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
749 if (IS_NODESEG(type))
750 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
751 __set_sit_entry_type(sbi, type, curseg->segno, modified);
755 * Allocate a current working segment.
756 * This function always allocates a free segment in LFS manner.
758 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
760 struct curseg_info *curseg = CURSEG_I(sbi, type);
761 unsigned int segno = curseg->segno;
762 int dir = ALLOC_LEFT;
764 write_sum_page(sbi, curseg->sum_blk,
765 GET_SUM_BLOCK(sbi, segno));
766 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
769 if (test_opt(sbi, NOHEAP))
772 get_new_segment(sbi, &segno, new_sec, dir);
773 curseg->next_segno = segno;
774 reset_curseg(sbi, type, 1);
775 curseg->alloc_type = LFS;
778 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
779 struct curseg_info *seg, block_t start)
781 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
782 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
783 unsigned long target_map[entries];
784 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
785 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
788 for (i = 0; i < entries; i++)
789 target_map[i] = ckpt_map[i] | cur_map[i];
791 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
793 seg->next_blkoff = pos;
797 * If a segment is written by LFS manner, next block offset is just obtained
798 * by increasing the current block offset. However, if a segment is written by
799 * SSR manner, next block offset obtained by calling __next_free_blkoff
801 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
802 struct curseg_info *seg)
804 if (seg->alloc_type == SSR)
805 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
811 * This function always allocates a used segment(from dirty seglist) by SSR
812 * manner, so it should recover the existing segment information of valid blocks
814 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
816 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
817 struct curseg_info *curseg = CURSEG_I(sbi, type);
818 unsigned int new_segno = curseg->next_segno;
819 struct f2fs_summary_block *sum_node;
820 struct page *sum_page;
822 write_sum_page(sbi, curseg->sum_blk,
823 GET_SUM_BLOCK(sbi, curseg->segno));
824 __set_test_and_inuse(sbi, new_segno);
826 mutex_lock(&dirty_i->seglist_lock);
827 __remove_dirty_segment(sbi, new_segno, PRE);
828 __remove_dirty_segment(sbi, new_segno, DIRTY);
829 mutex_unlock(&dirty_i->seglist_lock);
831 reset_curseg(sbi, type, 1);
832 curseg->alloc_type = SSR;
833 __next_free_blkoff(sbi, curseg, 0);
836 sum_page = get_sum_page(sbi, new_segno);
837 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
838 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
839 f2fs_put_page(sum_page, 1);
843 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
845 struct curseg_info *curseg = CURSEG_I(sbi, type);
846 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
848 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
849 return v_ops->get_victim(sbi,
850 &(curseg)->next_segno, BG_GC, type, SSR);
852 /* For data segments, let's do SSR more intensively */
853 for (; type >= CURSEG_HOT_DATA; type--)
854 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
861 * flush out current segment and replace it with new segment
862 * This function should be returned with success, otherwise BUG
864 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
865 int type, bool force)
867 struct curseg_info *curseg = CURSEG_I(sbi, type);
870 new_curseg(sbi, type, true);
871 else if (type == CURSEG_WARM_NODE)
872 new_curseg(sbi, type, false);
873 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
874 new_curseg(sbi, type, false);
875 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
876 change_curseg(sbi, type, true);
878 new_curseg(sbi, type, false);
880 stat_inc_seg_type(sbi, curseg);
883 void allocate_new_segments(struct f2fs_sb_info *sbi)
885 struct curseg_info *curseg;
886 unsigned int old_curseg;
889 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
890 curseg = CURSEG_I(sbi, i);
891 old_curseg = curseg->segno;
892 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
893 locate_dirty_segment(sbi, old_curseg);
897 static const struct segment_allocation default_salloc_ops = {
898 .allocate_segment = allocate_segment_by_default,
901 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
903 struct curseg_info *curseg = CURSEG_I(sbi, type);
904 if (curseg->next_blkoff < sbi->blocks_per_seg)
909 static int __get_segment_type_2(struct page *page, enum page_type p_type)
912 return CURSEG_HOT_DATA;
914 return CURSEG_HOT_NODE;
917 static int __get_segment_type_4(struct page *page, enum page_type p_type)
919 if (p_type == DATA) {
920 struct inode *inode = page->mapping->host;
922 if (S_ISDIR(inode->i_mode))
923 return CURSEG_HOT_DATA;
925 return CURSEG_COLD_DATA;
927 if (IS_DNODE(page) && !is_cold_node(page))
928 return CURSEG_HOT_NODE;
930 return CURSEG_COLD_NODE;
934 static int __get_segment_type_6(struct page *page, enum page_type p_type)
936 if (p_type == DATA) {
937 struct inode *inode = page->mapping->host;
939 if (S_ISDIR(inode->i_mode))
940 return CURSEG_HOT_DATA;
941 else if (is_cold_data(page) || file_is_cold(inode))
942 return CURSEG_COLD_DATA;
944 return CURSEG_WARM_DATA;
947 return is_cold_node(page) ? CURSEG_WARM_NODE :
950 return CURSEG_COLD_NODE;
954 static int __get_segment_type(struct page *page, enum page_type p_type)
956 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
957 switch (sbi->active_logs) {
959 return __get_segment_type_2(page, p_type);
961 return __get_segment_type_4(page, p_type);
963 /* NR_CURSEG_TYPE(6) logs by default */
964 f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE);
965 return __get_segment_type_6(page, p_type);
968 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
969 block_t old_blkaddr, block_t *new_blkaddr,
970 struct f2fs_summary *sum, int type)
972 struct sit_info *sit_i = SIT_I(sbi);
973 struct curseg_info *curseg;
975 curseg = CURSEG_I(sbi, type);
977 mutex_lock(&curseg->curseg_mutex);
979 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
982 * __add_sum_entry should be resided under the curseg_mutex
983 * because, this function updates a summary entry in the
984 * current summary block.
986 __add_sum_entry(sbi, type, sum);
988 mutex_lock(&sit_i->sentry_lock);
989 __refresh_next_blkoff(sbi, curseg);
991 stat_inc_block_count(sbi, curseg);
993 if (!__has_curseg_space(sbi, type))
994 sit_i->s_ops->allocate_segment(sbi, type, false);
996 * SIT information should be updated before segment allocation,
997 * since SSR needs latest valid block information.
999 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1001 mutex_unlock(&sit_i->sentry_lock);
1003 if (page && IS_NODESEG(type))
1004 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1006 mutex_unlock(&curseg->curseg_mutex);
1009 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
1010 block_t old_blkaddr, block_t *new_blkaddr,
1011 struct f2fs_summary *sum, struct f2fs_io_info *fio)
1013 int type = __get_segment_type(page, fio->type);
1015 allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type);
1017 /* writeout dirty page into bdev */
1018 f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio);
1021 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1023 struct f2fs_io_info fio = {
1025 .rw = WRITE_SYNC | REQ_META | REQ_PRIO
1028 set_page_writeback(page);
1029 f2fs_submit_page_mbio(sbi, page, page->index, &fio);
1032 void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
1033 struct f2fs_io_info *fio,
1034 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
1036 struct f2fs_summary sum;
1037 set_summary(&sum, nid, 0, 0);
1038 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio);
1041 void write_data_page(struct page *page, struct dnode_of_data *dn,
1042 block_t *new_blkaddr, struct f2fs_io_info *fio)
1044 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
1045 struct f2fs_summary sum;
1046 struct node_info ni;
1048 f2fs_bug_on(dn->data_blkaddr == NULL_ADDR);
1049 get_node_info(sbi, dn->nid, &ni);
1050 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1052 do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio);
1055 void rewrite_data_page(struct page *page, block_t old_blkaddr,
1056 struct f2fs_io_info *fio)
1058 struct inode *inode = page->mapping->host;
1059 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1060 f2fs_submit_page_mbio(sbi, page, old_blkaddr, fio);
1063 void recover_data_page(struct f2fs_sb_info *sbi,
1064 struct page *page, struct f2fs_summary *sum,
1065 block_t old_blkaddr, block_t new_blkaddr)
1067 struct sit_info *sit_i = SIT_I(sbi);
1068 struct curseg_info *curseg;
1069 unsigned int segno, old_cursegno;
1070 struct seg_entry *se;
1073 segno = GET_SEGNO(sbi, new_blkaddr);
1074 se = get_seg_entry(sbi, segno);
1077 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1078 if (old_blkaddr == NULL_ADDR)
1079 type = CURSEG_COLD_DATA;
1081 type = CURSEG_WARM_DATA;
1083 curseg = CURSEG_I(sbi, type);
1085 mutex_lock(&curseg->curseg_mutex);
1086 mutex_lock(&sit_i->sentry_lock);
1088 old_cursegno = curseg->segno;
1090 /* change the current segment */
1091 if (segno != curseg->segno) {
1092 curseg->next_segno = segno;
1093 change_curseg(sbi, type, true);
1096 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
1097 __add_sum_entry(sbi, type, sum);
1099 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1100 locate_dirty_segment(sbi, old_cursegno);
1102 mutex_unlock(&sit_i->sentry_lock);
1103 mutex_unlock(&curseg->curseg_mutex);
1106 static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1107 struct page *page, enum page_type type)
1109 enum page_type btype = PAGE_TYPE_OF_BIO(type);
1110 struct f2fs_bio_info *io = &sbi->write_io[btype];
1111 struct bio_vec *bvec;
1114 down_read(&io->io_rwsem);
1118 bio_for_each_segment_all(bvec, io->bio, i) {
1119 if (page == bvec->bv_page) {
1120 up_read(&io->io_rwsem);
1126 up_read(&io->io_rwsem);
1130 void f2fs_wait_on_page_writeback(struct page *page,
1131 enum page_type type)
1133 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1134 if (PageWriteback(page)) {
1135 if (is_merged_page(sbi, page, type))
1136 f2fs_submit_merged_bio(sbi, type, WRITE);
1137 wait_on_page_writeback(page);
1141 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1143 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1144 struct curseg_info *seg_i;
1145 unsigned char *kaddr;
1150 start = start_sum_block(sbi);
1152 page = get_meta_page(sbi, start++);
1153 kaddr = (unsigned char *)page_address(page);
1155 /* Step 1: restore nat cache */
1156 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1157 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1159 /* Step 2: restore sit cache */
1160 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1161 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1163 offset = 2 * SUM_JOURNAL_SIZE;
1165 /* Step 3: restore summary entries */
1166 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1167 unsigned short blk_off;
1170 seg_i = CURSEG_I(sbi, i);
1171 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1172 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1173 seg_i->next_segno = segno;
1174 reset_curseg(sbi, i, 0);
1175 seg_i->alloc_type = ckpt->alloc_type[i];
1176 seg_i->next_blkoff = blk_off;
1178 if (seg_i->alloc_type == SSR)
1179 blk_off = sbi->blocks_per_seg;
1181 for (j = 0; j < blk_off; j++) {
1182 struct f2fs_summary *s;
1183 s = (struct f2fs_summary *)(kaddr + offset);
1184 seg_i->sum_blk->entries[j] = *s;
1185 offset += SUMMARY_SIZE;
1186 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1190 f2fs_put_page(page, 1);
1193 page = get_meta_page(sbi, start++);
1194 kaddr = (unsigned char *)page_address(page);
1198 f2fs_put_page(page, 1);
1202 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1204 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1205 struct f2fs_summary_block *sum;
1206 struct curseg_info *curseg;
1208 unsigned short blk_off;
1209 unsigned int segno = 0;
1210 block_t blk_addr = 0;
1212 /* get segment number and block addr */
1213 if (IS_DATASEG(type)) {
1214 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1215 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1217 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1218 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1220 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1222 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1224 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1226 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1227 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1228 type - CURSEG_HOT_NODE);
1230 blk_addr = GET_SUM_BLOCK(sbi, segno);
1233 new = get_meta_page(sbi, blk_addr);
1234 sum = (struct f2fs_summary_block *)page_address(new);
1236 if (IS_NODESEG(type)) {
1237 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
1238 struct f2fs_summary *ns = &sum->entries[0];
1240 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1242 ns->ofs_in_node = 0;
1247 err = restore_node_summary(sbi, segno, sum);
1249 f2fs_put_page(new, 1);
1255 /* set uncompleted segment to curseg */
1256 curseg = CURSEG_I(sbi, type);
1257 mutex_lock(&curseg->curseg_mutex);
1258 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1259 curseg->next_segno = segno;
1260 reset_curseg(sbi, type, 0);
1261 curseg->alloc_type = ckpt->alloc_type[type];
1262 curseg->next_blkoff = blk_off;
1263 mutex_unlock(&curseg->curseg_mutex);
1264 f2fs_put_page(new, 1);
1268 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1270 int type = CURSEG_HOT_DATA;
1273 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1274 /* restore for compacted data summary */
1275 if (read_compacted_summaries(sbi))
1277 type = CURSEG_HOT_NODE;
1280 for (; type <= CURSEG_COLD_NODE; type++) {
1281 err = read_normal_summaries(sbi, type);
1289 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1292 unsigned char *kaddr;
1293 struct f2fs_summary *summary;
1294 struct curseg_info *seg_i;
1295 int written_size = 0;
1298 page = grab_meta_page(sbi, blkaddr++);
1299 kaddr = (unsigned char *)page_address(page);
1301 /* Step 1: write nat cache */
1302 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1303 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1304 written_size += SUM_JOURNAL_SIZE;
1306 /* Step 2: write sit cache */
1307 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1308 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1310 written_size += SUM_JOURNAL_SIZE;
1312 /* Step 3: write summary entries */
1313 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1314 unsigned short blkoff;
1315 seg_i = CURSEG_I(sbi, i);
1316 if (sbi->ckpt->alloc_type[i] == SSR)
1317 blkoff = sbi->blocks_per_seg;
1319 blkoff = curseg_blkoff(sbi, i);
1321 for (j = 0; j < blkoff; j++) {
1323 page = grab_meta_page(sbi, blkaddr++);
1324 kaddr = (unsigned char *)page_address(page);
1327 summary = (struct f2fs_summary *)(kaddr + written_size);
1328 *summary = seg_i->sum_blk->entries[j];
1329 written_size += SUMMARY_SIZE;
1331 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1335 set_page_dirty(page);
1336 f2fs_put_page(page, 1);
1341 set_page_dirty(page);
1342 f2fs_put_page(page, 1);
1346 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1347 block_t blkaddr, int type)
1350 if (IS_DATASEG(type))
1351 end = type + NR_CURSEG_DATA_TYPE;
1353 end = type + NR_CURSEG_NODE_TYPE;
1355 for (i = type; i < end; i++) {
1356 struct curseg_info *sum = CURSEG_I(sbi, i);
1357 mutex_lock(&sum->curseg_mutex);
1358 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1359 mutex_unlock(&sum->curseg_mutex);
1363 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1365 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1366 write_compacted_summaries(sbi, start_blk);
1368 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1371 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1373 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
1374 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1377 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1378 unsigned int val, int alloc)
1382 if (type == NAT_JOURNAL) {
1383 for (i = 0; i < nats_in_cursum(sum); i++) {
1384 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1387 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1388 return update_nats_in_cursum(sum, 1);
1389 } else if (type == SIT_JOURNAL) {
1390 for (i = 0; i < sits_in_cursum(sum); i++)
1391 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1393 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1394 return update_sits_in_cursum(sum, 1);
1399 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1402 struct sit_info *sit_i = SIT_I(sbi);
1403 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1404 block_t blk_addr = sit_i->sit_base_addr + offset;
1406 check_seg_range(sbi, segno);
1408 /* calculate sit block address */
1409 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1410 blk_addr += sit_i->sit_blocks;
1412 return get_meta_page(sbi, blk_addr);
1415 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1418 struct sit_info *sit_i = SIT_I(sbi);
1419 struct page *src_page, *dst_page;
1420 pgoff_t src_off, dst_off;
1421 void *src_addr, *dst_addr;
1423 src_off = current_sit_addr(sbi, start);
1424 dst_off = next_sit_addr(sbi, src_off);
1426 /* get current sit block page without lock */
1427 src_page = get_meta_page(sbi, src_off);
1428 dst_page = grab_meta_page(sbi, dst_off);
1429 f2fs_bug_on(PageDirty(src_page));
1431 src_addr = page_address(src_page);
1432 dst_addr = page_address(dst_page);
1433 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1435 set_page_dirty(dst_page);
1436 f2fs_put_page(src_page, 1);
1438 set_to_next_sit(sit_i, start);
1443 static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1445 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1446 struct f2fs_summary_block *sum = curseg->sum_blk;
1450 * If the journal area in the current summary is full of sit entries,
1451 * all the sit entries will be flushed. Otherwise the sit entries
1452 * are not able to replace with newly hot sit entries.
1454 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1455 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1457 segno = le32_to_cpu(segno_in_journal(sum, i));
1458 __mark_sit_entry_dirty(sbi, segno);
1460 update_sits_in_cursum(sum, -sits_in_cursum(sum));
1467 * CP calls this function, which flushes SIT entries including sit_journal,
1468 * and moves prefree segs to free segs.
1470 void flush_sit_entries(struct f2fs_sb_info *sbi)
1472 struct sit_info *sit_i = SIT_I(sbi);
1473 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1474 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1475 struct f2fs_summary_block *sum = curseg->sum_blk;
1476 unsigned long nsegs = TOTAL_SEGS(sbi);
1477 struct page *page = NULL;
1478 struct f2fs_sit_block *raw_sit = NULL;
1479 unsigned int start = 0, end = 0;
1483 mutex_lock(&curseg->curseg_mutex);
1484 mutex_lock(&sit_i->sentry_lock);
1487 * "flushed" indicates whether sit entries in journal are flushed
1488 * to the SIT area or not.
1490 flushed = flush_sits_in_journal(sbi);
1492 for_each_set_bit(segno, bitmap, nsegs) {
1493 struct seg_entry *se = get_seg_entry(sbi, segno);
1494 int sit_offset, offset;
1496 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1498 /* add discard candidates */
1499 if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards)
1500 add_discard_addrs(sbi, segno, se);
1505 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1507 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1508 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1512 if (!page || (start > segno) || (segno > end)) {
1514 f2fs_put_page(page, 1);
1518 start = START_SEGNO(sit_i, segno);
1519 end = start + SIT_ENTRY_PER_BLOCK - 1;
1521 /* read sit block that will be updated */
1522 page = get_next_sit_page(sbi, start);
1523 raw_sit = page_address(page);
1526 /* udpate entry in SIT block */
1527 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1529 __clear_bit(segno, bitmap);
1530 sit_i->dirty_sentries--;
1532 mutex_unlock(&sit_i->sentry_lock);
1533 mutex_unlock(&curseg->curseg_mutex);
1535 /* writeout last modified SIT block */
1536 f2fs_put_page(page, 1);
1538 set_prefree_as_free_segments(sbi);
1541 static int build_sit_info(struct f2fs_sb_info *sbi)
1543 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1544 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1545 struct sit_info *sit_i;
1546 unsigned int sit_segs, start;
1547 char *src_bitmap, *dst_bitmap;
1548 unsigned int bitmap_size;
1550 /* allocate memory for SIT information */
1551 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1555 SM_I(sbi)->sit_info = sit_i;
1557 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1558 if (!sit_i->sentries)
1561 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1562 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1563 if (!sit_i->dirty_sentries_bitmap)
1566 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1567 sit_i->sentries[start].cur_valid_map
1568 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1569 sit_i->sentries[start].ckpt_valid_map
1570 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1571 if (!sit_i->sentries[start].cur_valid_map
1572 || !sit_i->sentries[start].ckpt_valid_map)
1576 if (sbi->segs_per_sec > 1) {
1577 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
1578 sizeof(struct sec_entry));
1579 if (!sit_i->sec_entries)
1583 /* get information related with SIT */
1584 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1586 /* setup SIT bitmap from ckeckpoint pack */
1587 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1588 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1590 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
1594 /* init SIT information */
1595 sit_i->s_ops = &default_salloc_ops;
1597 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1598 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1599 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1600 sit_i->sit_bitmap = dst_bitmap;
1601 sit_i->bitmap_size = bitmap_size;
1602 sit_i->dirty_sentries = 0;
1603 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1604 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1605 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1606 mutex_init(&sit_i->sentry_lock);
1610 static int build_free_segmap(struct f2fs_sb_info *sbi)
1612 struct f2fs_sm_info *sm_info = SM_I(sbi);
1613 struct free_segmap_info *free_i;
1614 unsigned int bitmap_size, sec_bitmap_size;
1616 /* allocate memory for free segmap information */
1617 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1621 SM_I(sbi)->free_info = free_i;
1623 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1624 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1625 if (!free_i->free_segmap)
1628 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1629 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1630 if (!free_i->free_secmap)
1633 /* set all segments as dirty temporarily */
1634 memset(free_i->free_segmap, 0xff, bitmap_size);
1635 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1637 /* init free segmap information */
1638 free_i->start_segno =
1639 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1640 free_i->free_segments = 0;
1641 free_i->free_sections = 0;
1642 rwlock_init(&free_i->segmap_lock);
1646 static int build_curseg(struct f2fs_sb_info *sbi)
1648 struct curseg_info *array;
1651 array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
1655 SM_I(sbi)->curseg_array = array;
1657 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1658 mutex_init(&array[i].curseg_mutex);
1659 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1660 if (!array[i].sum_blk)
1662 array[i].segno = NULL_SEGNO;
1663 array[i].next_blkoff = 0;
1665 return restore_curseg_summaries(sbi);
1668 static void build_sit_entries(struct f2fs_sb_info *sbi)
1670 struct sit_info *sit_i = SIT_I(sbi);
1671 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1672 struct f2fs_summary_block *sum = curseg->sum_blk;
1673 int sit_blk_cnt = SIT_BLK_CNT(sbi);
1674 unsigned int i, start, end;
1675 unsigned int readed, start_blk = 0;
1676 int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
1679 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
1681 start = start_blk * sit_i->sents_per_block;
1682 end = (start_blk + readed) * sit_i->sents_per_block;
1684 for (; start < end && start < TOTAL_SEGS(sbi); start++) {
1685 struct seg_entry *se = &sit_i->sentries[start];
1686 struct f2fs_sit_block *sit_blk;
1687 struct f2fs_sit_entry sit;
1690 mutex_lock(&curseg->curseg_mutex);
1691 for (i = 0; i < sits_in_cursum(sum); i++) {
1692 if (le32_to_cpu(segno_in_journal(sum, i))
1694 sit = sit_in_journal(sum, i);
1695 mutex_unlock(&curseg->curseg_mutex);
1699 mutex_unlock(&curseg->curseg_mutex);
1701 page = get_current_sit_page(sbi, start);
1702 sit_blk = (struct f2fs_sit_block *)page_address(page);
1703 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1704 f2fs_put_page(page, 1);
1706 check_block_count(sbi, start, &sit);
1707 seg_info_from_raw_sit(se, &sit);
1708 if (sbi->segs_per_sec > 1) {
1709 struct sec_entry *e = get_sec_entry(sbi, start);
1710 e->valid_blocks += se->valid_blocks;
1713 start_blk += readed;
1714 } while (start_blk < sit_blk_cnt);
1717 static void init_free_segmap(struct f2fs_sb_info *sbi)
1722 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1723 struct seg_entry *sentry = get_seg_entry(sbi, start);
1724 if (!sentry->valid_blocks)
1725 __set_free(sbi, start);
1728 /* set use the current segments */
1729 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1730 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1731 __set_test_and_inuse(sbi, curseg_t->segno);
1735 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1737 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1738 struct free_segmap_info *free_i = FREE_I(sbi);
1739 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
1740 unsigned short valid_blocks;
1743 /* find dirty segment based on free segmap */
1744 segno = find_next_inuse(free_i, total_segs, offset);
1745 if (segno >= total_segs)
1748 valid_blocks = get_valid_blocks(sbi, segno, 0);
1749 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1751 mutex_lock(&dirty_i->seglist_lock);
1752 __locate_dirty_segment(sbi, segno, DIRTY);
1753 mutex_unlock(&dirty_i->seglist_lock);
1757 static int init_victim_secmap(struct f2fs_sb_info *sbi)
1759 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1760 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1762 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1763 if (!dirty_i->victim_secmap)
1768 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1770 struct dirty_seglist_info *dirty_i;
1771 unsigned int bitmap_size, i;
1773 /* allocate memory for dirty segments list information */
1774 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1778 SM_I(sbi)->dirty_info = dirty_i;
1779 mutex_init(&dirty_i->seglist_lock);
1781 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1783 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1784 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
1785 if (!dirty_i->dirty_segmap[i])
1789 init_dirty_segmap(sbi);
1790 return init_victim_secmap(sbi);
1794 * Update min, max modified time for cost-benefit GC algorithm
1796 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1798 struct sit_info *sit_i = SIT_I(sbi);
1801 mutex_lock(&sit_i->sentry_lock);
1803 sit_i->min_mtime = LLONG_MAX;
1805 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1807 unsigned long long mtime = 0;
1809 for (i = 0; i < sbi->segs_per_sec; i++)
1810 mtime += get_seg_entry(sbi, segno + i)->mtime;
1812 mtime = div_u64(mtime, sbi->segs_per_sec);
1814 if (sit_i->min_mtime > mtime)
1815 sit_i->min_mtime = mtime;
1817 sit_i->max_mtime = get_mtime(sbi);
1818 mutex_unlock(&sit_i->sentry_lock);
1821 int build_segment_manager(struct f2fs_sb_info *sbi)
1823 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1824 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1825 struct f2fs_sm_info *sm_info;
1828 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1833 sbi->sm_info = sm_info;
1834 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1835 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1836 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1837 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1838 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1839 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1840 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1841 sm_info->rec_prefree_segments = sm_info->main_segments *
1842 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
1843 sm_info->ipu_policy = F2FS_IPU_DISABLE;
1844 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
1846 INIT_LIST_HEAD(&sm_info->discard_list);
1847 sm_info->nr_discards = 0;
1848 sm_info->max_discards = 0;
1850 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
1851 err = create_flush_cmd_control(sbi);
1856 err = build_sit_info(sbi);
1859 err = build_free_segmap(sbi);
1862 err = build_curseg(sbi);
1866 /* reinit free segmap based on SIT */
1867 build_sit_entries(sbi);
1869 init_free_segmap(sbi);
1870 err = build_dirty_segmap(sbi);
1874 init_min_max_mtime(sbi);
1878 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1879 enum dirty_type dirty_type)
1881 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1883 mutex_lock(&dirty_i->seglist_lock);
1884 kfree(dirty_i->dirty_segmap[dirty_type]);
1885 dirty_i->nr_dirty[dirty_type] = 0;
1886 mutex_unlock(&dirty_i->seglist_lock);
1889 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
1891 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1892 kfree(dirty_i->victim_secmap);
1895 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1897 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1903 /* discard pre-free/dirty segments list */
1904 for (i = 0; i < NR_DIRTY_TYPE; i++)
1905 discard_dirty_segmap(sbi, i);
1907 destroy_victim_secmap(sbi);
1908 SM_I(sbi)->dirty_info = NULL;
1912 static void destroy_curseg(struct f2fs_sb_info *sbi)
1914 struct curseg_info *array = SM_I(sbi)->curseg_array;
1919 SM_I(sbi)->curseg_array = NULL;
1920 for (i = 0; i < NR_CURSEG_TYPE; i++)
1921 kfree(array[i].sum_blk);
1925 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1927 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1930 SM_I(sbi)->free_info = NULL;
1931 kfree(free_i->free_segmap);
1932 kfree(free_i->free_secmap);
1936 static void destroy_sit_info(struct f2fs_sb_info *sbi)
1938 struct sit_info *sit_i = SIT_I(sbi);
1944 if (sit_i->sentries) {
1945 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1946 kfree(sit_i->sentries[start].cur_valid_map);
1947 kfree(sit_i->sentries[start].ckpt_valid_map);
1950 vfree(sit_i->sentries);
1951 vfree(sit_i->sec_entries);
1952 kfree(sit_i->dirty_sentries_bitmap);
1954 SM_I(sbi)->sit_info = NULL;
1955 kfree(sit_i->sit_bitmap);
1959 void destroy_segment_manager(struct f2fs_sb_info *sbi)
1961 struct f2fs_sm_info *sm_info = SM_I(sbi);
1965 destroy_flush_cmd_control(sbi);
1966 destroy_dirty_segmap(sbi);
1967 destroy_curseg(sbi);
1968 destroy_free_segmap(sbi);
1969 destroy_sit_info(sbi);
1970 sbi->sm_info = NULL;
1974 int __init create_segment_manager_caches(void)
1976 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
1977 sizeof(struct discard_entry));
1978 if (!discard_entry_slab)
1983 void destroy_segment_manager_caches(void)
1985 kmem_cache_destroy(discard_entry_slab);