4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/vmalloc.h>
18 #include <linux/swap.h>
23 #include <trace/events/f2fs.h>
25 #define __reverse_ffz(x) __reverse_ffs(~(x))
27 static struct kmem_cache *discard_entry_slab;
28 static struct kmem_cache *sit_entry_set_slab;
31 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
32 * MSB and LSB are reversed in a byte by f2fs_set_bit.
34 static inline unsigned long __reverse_ffs(unsigned long word)
38 #if BITS_PER_LONG == 64
39 if ((word & 0xffffffff) == 0) {
44 if ((word & 0xffff) == 0) {
48 if ((word & 0xff) == 0) {
52 if ((word & 0xf0) == 0)
56 if ((word & 0xc) == 0)
60 if ((word & 0x2) == 0)
66 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
67 * f2fs_set_bit makes MSB and LSB reversed in a byte.
70 * f2fs_set_bit(0, bitmap) => 0000 0001
71 * f2fs_set_bit(7, bitmap) => 1000 0000
73 static unsigned long __find_rev_next_bit(const unsigned long *addr,
74 unsigned long size, unsigned long offset)
76 const unsigned long *p = addr + BIT_WORD(offset);
77 unsigned long result = offset & ~(BITS_PER_LONG - 1);
79 unsigned long mask, submask;
80 unsigned long quot, rest;
86 offset %= BITS_PER_LONG;
91 quot = (offset >> 3) << 3;
94 submask = (unsigned char)(0xff << rest) >> rest;
98 if (size < BITS_PER_LONG)
103 size -= BITS_PER_LONG;
104 result += BITS_PER_LONG;
106 while (size & ~(BITS_PER_LONG-1)) {
110 result += BITS_PER_LONG;
111 size -= BITS_PER_LONG;
117 tmp &= (~0UL >> (BITS_PER_LONG - size));
118 if (tmp == 0UL) /* Are any bits set? */
119 return result + size; /* Nope. */
121 return result + __reverse_ffs(tmp);
124 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
125 unsigned long size, unsigned long offset)
127 const unsigned long *p = addr + BIT_WORD(offset);
128 unsigned long result = offset & ~(BITS_PER_LONG - 1);
130 unsigned long mask, submask;
131 unsigned long quot, rest;
137 offset %= BITS_PER_LONG;
142 quot = (offset >> 3) << 3;
144 mask = ~(~0UL << quot);
145 submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
149 if (size < BITS_PER_LONG)
154 size -= BITS_PER_LONG;
155 result += BITS_PER_LONG;
157 while (size & ~(BITS_PER_LONG - 1)) {
161 result += BITS_PER_LONG;
162 size -= BITS_PER_LONG;
170 if (tmp == ~0UL) /* Are any bits zero? */
171 return result + size; /* Nope. */
173 return result + __reverse_ffz(tmp);
177 * This function balances dirty node and dentry pages.
178 * In addition, it controls garbage collection.
180 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
183 * We should do GC or end up with checkpoint, if there are so many dirty
184 * dir/node pages without enough free segments.
186 if (has_not_enough_free_secs(sbi, 0)) {
187 mutex_lock(&sbi->gc_mutex);
192 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
194 /* check the # of cached NAT entries and prefree segments */
195 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
196 excess_prefree_segs(sbi))
197 f2fs_sync_fs(sbi->sb, true);
200 static int issue_flush_thread(void *data)
202 struct f2fs_sb_info *sbi = data;
203 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
204 wait_queue_head_t *q = &fcc->flush_wait_queue;
206 if (kthread_should_stop())
209 if (!llist_empty(&fcc->issue_list)) {
210 struct bio *bio = bio_alloc(GFP_NOIO, 0);
211 struct flush_cmd *cmd, *next;
214 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
215 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
217 bio->bi_bdev = sbi->sb->s_bdev;
218 ret = submit_bio_wait(WRITE_FLUSH, bio);
220 llist_for_each_entry_safe(cmd, next,
221 fcc->dispatch_list, llnode) {
223 complete(&cmd->wait);
226 fcc->dispatch_list = NULL;
229 wait_event_interruptible(*q,
230 kthread_should_stop() || !llist_empty(&fcc->issue_list));
234 int f2fs_issue_flush(struct f2fs_sb_info *sbi)
236 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
237 struct flush_cmd cmd;
239 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
240 test_opt(sbi, FLUSH_MERGE));
242 if (test_opt(sbi, NOBARRIER))
245 if (!test_opt(sbi, FLUSH_MERGE))
246 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
248 init_completion(&cmd.wait);
250 llist_add(&cmd.llnode, &fcc->issue_list);
252 if (!fcc->dispatch_list)
253 wake_up(&fcc->flush_wait_queue);
255 wait_for_completion(&cmd.wait);
260 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
262 dev_t dev = sbi->sb->s_bdev->bd_dev;
263 struct flush_cmd_control *fcc;
266 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
269 init_waitqueue_head(&fcc->flush_wait_queue);
270 init_llist_head(&fcc->issue_list);
271 SM_I(sbi)->cmd_control_info = fcc;
272 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
273 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
274 if (IS_ERR(fcc->f2fs_issue_flush)) {
275 err = PTR_ERR(fcc->f2fs_issue_flush);
277 SM_I(sbi)->cmd_control_info = NULL;
284 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
286 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
288 if (fcc && fcc->f2fs_issue_flush)
289 kthread_stop(fcc->f2fs_issue_flush);
291 SM_I(sbi)->cmd_control_info = NULL;
294 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
295 enum dirty_type dirty_type)
297 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
299 /* need not be added */
300 if (IS_CURSEG(sbi, segno))
303 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
304 dirty_i->nr_dirty[dirty_type]++;
306 if (dirty_type == DIRTY) {
307 struct seg_entry *sentry = get_seg_entry(sbi, segno);
308 enum dirty_type t = sentry->type;
310 if (unlikely(t >= DIRTY)) {
314 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
315 dirty_i->nr_dirty[t]++;
319 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
320 enum dirty_type dirty_type)
322 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
324 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
325 dirty_i->nr_dirty[dirty_type]--;
327 if (dirty_type == DIRTY) {
328 struct seg_entry *sentry = get_seg_entry(sbi, segno);
329 enum dirty_type t = sentry->type;
331 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
332 dirty_i->nr_dirty[t]--;
334 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
335 clear_bit(GET_SECNO(sbi, segno),
336 dirty_i->victim_secmap);
341 * Should not occur error such as -ENOMEM.
342 * Adding dirty entry into seglist is not critical operation.
343 * If a given segment is one of current working segments, it won't be added.
345 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
347 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
348 unsigned short valid_blocks;
350 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
353 mutex_lock(&dirty_i->seglist_lock);
355 valid_blocks = get_valid_blocks(sbi, segno, 0);
357 if (valid_blocks == 0) {
358 __locate_dirty_segment(sbi, segno, PRE);
359 __remove_dirty_segment(sbi, segno, DIRTY);
360 } else if (valid_blocks < sbi->blocks_per_seg) {
361 __locate_dirty_segment(sbi, segno, DIRTY);
363 /* Recovery routine with SSR needs this */
364 __remove_dirty_segment(sbi, segno, DIRTY);
367 mutex_unlock(&dirty_i->seglist_lock);
370 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
371 block_t blkstart, block_t blklen)
373 sector_t start = SECTOR_FROM_BLOCK(sbi, blkstart);
374 sector_t len = SECTOR_FROM_BLOCK(sbi, blklen);
375 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
376 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
379 void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
381 if (f2fs_issue_discard(sbi, blkaddr, 1)) {
382 struct page *page = grab_meta_page(sbi, blkaddr);
383 /* zero-filled page */
384 set_page_dirty(page);
385 f2fs_put_page(page, 1);
389 static void add_discard_addrs(struct f2fs_sb_info *sbi,
390 unsigned int segno, struct seg_entry *se)
392 struct list_head *head = &SM_I(sbi)->discard_list;
393 struct discard_entry *new;
394 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
395 int max_blocks = sbi->blocks_per_seg;
396 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
397 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
398 unsigned long dmap[entries];
399 unsigned int start = 0, end = -1;
402 if (!test_opt(sbi, DISCARD))
405 /* zero block will be discarded through the prefree list */
406 if (!se->valid_blocks || se->valid_blocks == max_blocks)
409 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
410 for (i = 0; i < entries; i++)
411 dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
413 while (SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
414 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
415 if (start >= max_blocks)
418 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
420 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
421 INIT_LIST_HEAD(&new->list);
422 new->blkaddr = START_BLOCK(sbi, segno) + start;
423 new->len = end - start;
425 list_add_tail(&new->list, head);
426 SM_I(sbi)->nr_discards += end - start;
431 * Should call clear_prefree_segments after checkpoint is done.
433 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
435 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
437 unsigned int total_segs = TOTAL_SEGS(sbi);
439 mutex_lock(&dirty_i->seglist_lock);
440 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], total_segs)
441 __set_test_and_free(sbi, segno);
442 mutex_unlock(&dirty_i->seglist_lock);
445 void clear_prefree_segments(struct f2fs_sb_info *sbi)
447 struct list_head *head = &(SM_I(sbi)->discard_list);
448 struct discard_entry *entry, *this;
449 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
450 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
451 unsigned int total_segs = TOTAL_SEGS(sbi);
452 unsigned int start = 0, end = -1;
454 mutex_lock(&dirty_i->seglist_lock);
458 start = find_next_bit(prefree_map, total_segs, end + 1);
459 if (start >= total_segs)
461 end = find_next_zero_bit(prefree_map, total_segs, start + 1);
463 for (i = start; i < end; i++)
464 clear_bit(i, prefree_map);
466 dirty_i->nr_dirty[PRE] -= end - start;
468 if (!test_opt(sbi, DISCARD))
471 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
472 (end - start) << sbi->log_blocks_per_seg);
474 mutex_unlock(&dirty_i->seglist_lock);
476 /* send small discards */
477 list_for_each_entry_safe(entry, this, head, list) {
478 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
479 list_del(&entry->list);
480 SM_I(sbi)->nr_discards -= entry->len;
481 kmem_cache_free(discard_entry_slab, entry);
485 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
487 struct sit_info *sit_i = SIT_I(sbi);
489 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
490 sit_i->dirty_sentries++;
497 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
498 unsigned int segno, int modified)
500 struct seg_entry *se = get_seg_entry(sbi, segno);
503 __mark_sit_entry_dirty(sbi, segno);
506 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
508 struct seg_entry *se;
509 unsigned int segno, offset;
510 long int new_vblocks;
512 segno = GET_SEGNO(sbi, blkaddr);
514 se = get_seg_entry(sbi, segno);
515 new_vblocks = se->valid_blocks + del;
516 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
518 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
519 (new_vblocks > sbi->blocks_per_seg)));
521 se->valid_blocks = new_vblocks;
522 se->mtime = get_mtime(sbi);
523 SIT_I(sbi)->max_mtime = se->mtime;
525 /* Update valid block bitmap */
527 if (f2fs_set_bit(offset, se->cur_valid_map))
530 if (!f2fs_clear_bit(offset, se->cur_valid_map))
533 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
534 se->ckpt_valid_blocks += del;
536 __mark_sit_entry_dirty(sbi, segno);
538 /* update total number of valid blocks to be written in ckpt area */
539 SIT_I(sbi)->written_valid_blocks += del;
541 if (sbi->segs_per_sec > 1)
542 get_sec_entry(sbi, segno)->valid_blocks += del;
545 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
547 update_sit_entry(sbi, new, 1);
548 if (GET_SEGNO(sbi, old) != NULL_SEGNO)
549 update_sit_entry(sbi, old, -1);
551 locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
552 locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
555 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
557 unsigned int segno = GET_SEGNO(sbi, addr);
558 struct sit_info *sit_i = SIT_I(sbi);
560 f2fs_bug_on(sbi, addr == NULL_ADDR);
561 if (addr == NEW_ADDR)
564 /* add it into sit main buffer */
565 mutex_lock(&sit_i->sentry_lock);
567 update_sit_entry(sbi, addr, -1);
569 /* add it into dirty seglist */
570 locate_dirty_segment(sbi, segno);
572 mutex_unlock(&sit_i->sentry_lock);
576 * This function should be resided under the curseg_mutex lock
578 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
579 struct f2fs_summary *sum)
581 struct curseg_info *curseg = CURSEG_I(sbi, type);
582 void *addr = curseg->sum_blk;
583 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
584 memcpy(addr, sum, sizeof(struct f2fs_summary));
588 * Calculate the number of current summary pages for writing
590 int npages_for_summary_flush(struct f2fs_sb_info *sbi)
592 int valid_sum_count = 0;
595 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
596 if (sbi->ckpt->alloc_type[i] == SSR)
597 valid_sum_count += sbi->blocks_per_seg;
599 valid_sum_count += curseg_blkoff(sbi, i);
602 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
603 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
604 if (valid_sum_count <= sum_in_page)
606 else if ((valid_sum_count - sum_in_page) <=
607 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
613 * Caller should put this summary page
615 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
617 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
620 static void write_sum_page(struct f2fs_sb_info *sbi,
621 struct f2fs_summary_block *sum_blk, block_t blk_addr)
623 struct page *page = grab_meta_page(sbi, blk_addr);
624 void *kaddr = page_address(page);
625 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
626 set_page_dirty(page);
627 f2fs_put_page(page, 1);
630 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
632 struct curseg_info *curseg = CURSEG_I(sbi, type);
633 unsigned int segno = curseg->segno + 1;
634 struct free_segmap_info *free_i = FREE_I(sbi);
636 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
637 return !test_bit(segno, free_i->free_segmap);
642 * Find a new segment from the free segments bitmap to right order
643 * This function should be returned with success, otherwise BUG
645 static void get_new_segment(struct f2fs_sb_info *sbi,
646 unsigned int *newseg, bool new_sec, int dir)
648 struct free_segmap_info *free_i = FREE_I(sbi);
649 unsigned int segno, secno, zoneno;
650 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
651 unsigned int hint = *newseg / sbi->segs_per_sec;
652 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
653 unsigned int left_start = hint;
658 write_lock(&free_i->segmap_lock);
660 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
661 segno = find_next_zero_bit(free_i->free_segmap,
662 TOTAL_SEGS(sbi), *newseg + 1);
663 if (segno - *newseg < sbi->segs_per_sec -
664 (*newseg % sbi->segs_per_sec))
668 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
669 if (secno >= TOTAL_SECS(sbi)) {
670 if (dir == ALLOC_RIGHT) {
671 secno = find_next_zero_bit(free_i->free_secmap,
673 f2fs_bug_on(sbi, secno >= TOTAL_SECS(sbi));
676 left_start = hint - 1;
682 while (test_bit(left_start, free_i->free_secmap)) {
683 if (left_start > 0) {
687 left_start = find_next_zero_bit(free_i->free_secmap,
689 f2fs_bug_on(sbi, left_start >= TOTAL_SECS(sbi));
695 segno = secno * sbi->segs_per_sec;
696 zoneno = secno / sbi->secs_per_zone;
698 /* give up on finding another zone */
701 if (sbi->secs_per_zone == 1)
703 if (zoneno == old_zoneno)
705 if (dir == ALLOC_LEFT) {
706 if (!go_left && zoneno + 1 >= total_zones)
708 if (go_left && zoneno == 0)
711 for (i = 0; i < NR_CURSEG_TYPE; i++)
712 if (CURSEG_I(sbi, i)->zone == zoneno)
715 if (i < NR_CURSEG_TYPE) {
716 /* zone is in user, try another */
718 hint = zoneno * sbi->secs_per_zone - 1;
719 else if (zoneno + 1 >= total_zones)
722 hint = (zoneno + 1) * sbi->secs_per_zone;
724 goto find_other_zone;
727 /* set it as dirty segment in free segmap */
728 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
729 __set_inuse(sbi, segno);
731 write_unlock(&free_i->segmap_lock);
734 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
736 struct curseg_info *curseg = CURSEG_I(sbi, type);
737 struct summary_footer *sum_footer;
739 curseg->segno = curseg->next_segno;
740 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
741 curseg->next_blkoff = 0;
742 curseg->next_segno = NULL_SEGNO;
744 sum_footer = &(curseg->sum_blk->footer);
745 memset(sum_footer, 0, sizeof(struct summary_footer));
746 if (IS_DATASEG(type))
747 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
748 if (IS_NODESEG(type))
749 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
750 __set_sit_entry_type(sbi, type, curseg->segno, modified);
754 * Allocate a current working segment.
755 * This function always allocates a free segment in LFS manner.
757 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
759 struct curseg_info *curseg = CURSEG_I(sbi, type);
760 unsigned int segno = curseg->segno;
761 int dir = ALLOC_LEFT;
763 write_sum_page(sbi, curseg->sum_blk,
764 GET_SUM_BLOCK(sbi, segno));
765 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
768 if (test_opt(sbi, NOHEAP))
771 get_new_segment(sbi, &segno, new_sec, dir);
772 curseg->next_segno = segno;
773 reset_curseg(sbi, type, 1);
774 curseg->alloc_type = LFS;
777 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
778 struct curseg_info *seg, block_t start)
780 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
781 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
782 unsigned long target_map[entries];
783 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
784 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
787 for (i = 0; i < entries; i++)
788 target_map[i] = ckpt_map[i] | cur_map[i];
790 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
792 seg->next_blkoff = pos;
796 * If a segment is written by LFS manner, next block offset is just obtained
797 * by increasing the current block offset. However, if a segment is written by
798 * SSR manner, next block offset obtained by calling __next_free_blkoff
800 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
801 struct curseg_info *seg)
803 if (seg->alloc_type == SSR)
804 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
810 * This function always allocates a used segment(from dirty seglist) by SSR
811 * manner, so it should recover the existing segment information of valid blocks
813 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
815 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
816 struct curseg_info *curseg = CURSEG_I(sbi, type);
817 unsigned int new_segno = curseg->next_segno;
818 struct f2fs_summary_block *sum_node;
819 struct page *sum_page;
821 write_sum_page(sbi, curseg->sum_blk,
822 GET_SUM_BLOCK(sbi, curseg->segno));
823 __set_test_and_inuse(sbi, new_segno);
825 mutex_lock(&dirty_i->seglist_lock);
826 __remove_dirty_segment(sbi, new_segno, PRE);
827 __remove_dirty_segment(sbi, new_segno, DIRTY);
828 mutex_unlock(&dirty_i->seglist_lock);
830 reset_curseg(sbi, type, 1);
831 curseg->alloc_type = SSR;
832 __next_free_blkoff(sbi, curseg, 0);
835 sum_page = get_sum_page(sbi, new_segno);
836 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
837 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
838 f2fs_put_page(sum_page, 1);
842 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
844 struct curseg_info *curseg = CURSEG_I(sbi, type);
845 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
847 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
848 return v_ops->get_victim(sbi,
849 &(curseg)->next_segno, BG_GC, type, SSR);
851 /* For data segments, let's do SSR more intensively */
852 for (; type >= CURSEG_HOT_DATA; type--)
853 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
860 * flush out current segment and replace it with new segment
861 * This function should be returned with success, otherwise BUG
863 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
864 int type, bool force)
866 struct curseg_info *curseg = CURSEG_I(sbi, type);
869 new_curseg(sbi, type, true);
870 else if (type == CURSEG_WARM_NODE)
871 new_curseg(sbi, type, false);
872 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
873 new_curseg(sbi, type, false);
874 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
875 change_curseg(sbi, type, true);
877 new_curseg(sbi, type, false);
879 stat_inc_seg_type(sbi, curseg);
882 void allocate_new_segments(struct f2fs_sb_info *sbi)
884 struct curseg_info *curseg;
885 unsigned int old_curseg;
888 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
889 curseg = CURSEG_I(sbi, i);
890 old_curseg = curseg->segno;
891 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
892 locate_dirty_segment(sbi, old_curseg);
896 static const struct segment_allocation default_salloc_ops = {
897 .allocate_segment = allocate_segment_by_default,
900 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
902 struct curseg_info *curseg = CURSEG_I(sbi, type);
903 if (curseg->next_blkoff < sbi->blocks_per_seg)
908 static int __get_segment_type_2(struct page *page, enum page_type p_type)
911 return CURSEG_HOT_DATA;
913 return CURSEG_HOT_NODE;
916 static int __get_segment_type_4(struct page *page, enum page_type p_type)
918 if (p_type == DATA) {
919 struct inode *inode = page->mapping->host;
921 if (S_ISDIR(inode->i_mode))
922 return CURSEG_HOT_DATA;
924 return CURSEG_COLD_DATA;
926 if (IS_DNODE(page) && !is_cold_node(page))
927 return CURSEG_HOT_NODE;
929 return CURSEG_COLD_NODE;
933 static int __get_segment_type_6(struct page *page, enum page_type p_type)
935 if (p_type == DATA) {
936 struct inode *inode = page->mapping->host;
938 if (S_ISDIR(inode->i_mode))
939 return CURSEG_HOT_DATA;
940 else if (is_cold_data(page) || file_is_cold(inode))
941 return CURSEG_COLD_DATA;
943 return CURSEG_WARM_DATA;
946 return is_cold_node(page) ? CURSEG_WARM_NODE :
949 return CURSEG_COLD_NODE;
953 static int __get_segment_type(struct page *page, enum page_type p_type)
955 switch (F2FS_P_SB(page)->active_logs) {
957 return __get_segment_type_2(page, p_type);
959 return __get_segment_type_4(page, p_type);
961 /* NR_CURSEG_TYPE(6) logs by default */
962 f2fs_bug_on(F2FS_P_SB(page),
963 F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
964 return __get_segment_type_6(page, p_type);
967 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
968 block_t old_blkaddr, block_t *new_blkaddr,
969 struct f2fs_summary *sum, int type)
971 struct sit_info *sit_i = SIT_I(sbi);
972 struct curseg_info *curseg;
974 curseg = CURSEG_I(sbi, type);
976 mutex_lock(&curseg->curseg_mutex);
978 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
981 * __add_sum_entry should be resided under the curseg_mutex
982 * because, this function updates a summary entry in the
983 * current summary block.
985 __add_sum_entry(sbi, type, sum);
987 mutex_lock(&sit_i->sentry_lock);
988 __refresh_next_blkoff(sbi, curseg);
990 stat_inc_block_count(sbi, curseg);
992 if (!__has_curseg_space(sbi, type))
993 sit_i->s_ops->allocate_segment(sbi, type, false);
995 * SIT information should be updated before segment allocation,
996 * since SSR needs latest valid block information.
998 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1000 mutex_unlock(&sit_i->sentry_lock);
1002 if (page && IS_NODESEG(type))
1003 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1005 mutex_unlock(&curseg->curseg_mutex);
1008 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
1009 block_t old_blkaddr, block_t *new_blkaddr,
1010 struct f2fs_summary *sum, struct f2fs_io_info *fio)
1012 int type = __get_segment_type(page, fio->type);
1014 allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type);
1016 /* writeout dirty page into bdev */
1017 f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio);
1020 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1022 struct f2fs_io_info fio = {
1024 .rw = WRITE_SYNC | REQ_META | REQ_PRIO
1027 set_page_writeback(page);
1028 f2fs_submit_page_mbio(sbi, page, page->index, &fio);
1031 void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
1032 struct f2fs_io_info *fio,
1033 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
1035 struct f2fs_summary sum;
1036 set_summary(&sum, nid, 0, 0);
1037 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio);
1040 void write_data_page(struct page *page, struct dnode_of_data *dn,
1041 block_t *new_blkaddr, struct f2fs_io_info *fio)
1043 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1044 struct f2fs_summary sum;
1045 struct node_info ni;
1047 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
1048 get_node_info(sbi, dn->nid, &ni);
1049 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1051 do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio);
1054 void rewrite_data_page(struct page *page, block_t old_blkaddr,
1055 struct f2fs_io_info *fio)
1057 f2fs_submit_page_mbio(F2FS_P_SB(page), page, old_blkaddr, fio);
1060 void recover_data_page(struct f2fs_sb_info *sbi,
1061 struct page *page, struct f2fs_summary *sum,
1062 block_t old_blkaddr, block_t new_blkaddr)
1064 struct sit_info *sit_i = SIT_I(sbi);
1065 struct curseg_info *curseg;
1066 unsigned int segno, old_cursegno;
1067 struct seg_entry *se;
1070 segno = GET_SEGNO(sbi, new_blkaddr);
1071 se = get_seg_entry(sbi, segno);
1074 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1075 if (old_blkaddr == NULL_ADDR)
1076 type = CURSEG_COLD_DATA;
1078 type = CURSEG_WARM_DATA;
1080 curseg = CURSEG_I(sbi, type);
1082 mutex_lock(&curseg->curseg_mutex);
1083 mutex_lock(&sit_i->sentry_lock);
1085 old_cursegno = curseg->segno;
1087 /* change the current segment */
1088 if (segno != curseg->segno) {
1089 curseg->next_segno = segno;
1090 change_curseg(sbi, type, true);
1093 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
1094 __add_sum_entry(sbi, type, sum);
1096 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1097 locate_dirty_segment(sbi, old_cursegno);
1099 mutex_unlock(&sit_i->sentry_lock);
1100 mutex_unlock(&curseg->curseg_mutex);
1103 static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1104 struct page *page, enum page_type type)
1106 enum page_type btype = PAGE_TYPE_OF_BIO(type);
1107 struct f2fs_bio_info *io = &sbi->write_io[btype];
1108 struct bio_vec *bvec;
1111 down_read(&io->io_rwsem);
1115 bio_for_each_segment_all(bvec, io->bio, i) {
1116 if (page == bvec->bv_page) {
1117 up_read(&io->io_rwsem);
1123 up_read(&io->io_rwsem);
1127 void f2fs_wait_on_page_writeback(struct page *page,
1128 enum page_type type)
1130 if (PageWriteback(page)) {
1131 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1133 if (is_merged_page(sbi, page, type))
1134 f2fs_submit_merged_bio(sbi, type, WRITE);
1135 wait_on_page_writeback(page);
1139 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1141 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1142 struct curseg_info *seg_i;
1143 unsigned char *kaddr;
1148 start = start_sum_block(sbi);
1150 page = get_meta_page(sbi, start++);
1151 kaddr = (unsigned char *)page_address(page);
1153 /* Step 1: restore nat cache */
1154 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1155 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1157 /* Step 2: restore sit cache */
1158 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1159 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1161 offset = 2 * SUM_JOURNAL_SIZE;
1163 /* Step 3: restore summary entries */
1164 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1165 unsigned short blk_off;
1168 seg_i = CURSEG_I(sbi, i);
1169 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1170 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1171 seg_i->next_segno = segno;
1172 reset_curseg(sbi, i, 0);
1173 seg_i->alloc_type = ckpt->alloc_type[i];
1174 seg_i->next_blkoff = blk_off;
1176 if (seg_i->alloc_type == SSR)
1177 blk_off = sbi->blocks_per_seg;
1179 for (j = 0; j < blk_off; j++) {
1180 struct f2fs_summary *s;
1181 s = (struct f2fs_summary *)(kaddr + offset);
1182 seg_i->sum_blk->entries[j] = *s;
1183 offset += SUMMARY_SIZE;
1184 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1188 f2fs_put_page(page, 1);
1191 page = get_meta_page(sbi, start++);
1192 kaddr = (unsigned char *)page_address(page);
1196 f2fs_put_page(page, 1);
1200 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1202 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1203 struct f2fs_summary_block *sum;
1204 struct curseg_info *curseg;
1206 unsigned short blk_off;
1207 unsigned int segno = 0;
1208 block_t blk_addr = 0;
1210 /* get segment number and block addr */
1211 if (IS_DATASEG(type)) {
1212 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1213 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1215 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1216 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1218 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1220 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1222 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1224 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1225 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1226 type - CURSEG_HOT_NODE);
1228 blk_addr = GET_SUM_BLOCK(sbi, segno);
1231 new = get_meta_page(sbi, blk_addr);
1232 sum = (struct f2fs_summary_block *)page_address(new);
1234 if (IS_NODESEG(type)) {
1235 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
1236 struct f2fs_summary *ns = &sum->entries[0];
1238 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1240 ns->ofs_in_node = 0;
1245 err = restore_node_summary(sbi, segno, sum);
1247 f2fs_put_page(new, 1);
1253 /* set uncompleted segment to curseg */
1254 curseg = CURSEG_I(sbi, type);
1255 mutex_lock(&curseg->curseg_mutex);
1256 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1257 curseg->next_segno = segno;
1258 reset_curseg(sbi, type, 0);
1259 curseg->alloc_type = ckpt->alloc_type[type];
1260 curseg->next_blkoff = blk_off;
1261 mutex_unlock(&curseg->curseg_mutex);
1262 f2fs_put_page(new, 1);
1266 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1268 int type = CURSEG_HOT_DATA;
1271 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1272 /* restore for compacted data summary */
1273 if (read_compacted_summaries(sbi))
1275 type = CURSEG_HOT_NODE;
1278 for (; type <= CURSEG_COLD_NODE; type++) {
1279 err = read_normal_summaries(sbi, type);
1287 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1290 unsigned char *kaddr;
1291 struct f2fs_summary *summary;
1292 struct curseg_info *seg_i;
1293 int written_size = 0;
1296 page = grab_meta_page(sbi, blkaddr++);
1297 kaddr = (unsigned char *)page_address(page);
1299 /* Step 1: write nat cache */
1300 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1301 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1302 written_size += SUM_JOURNAL_SIZE;
1304 /* Step 2: write sit cache */
1305 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1306 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1308 written_size += SUM_JOURNAL_SIZE;
1310 /* Step 3: write summary entries */
1311 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1312 unsigned short blkoff;
1313 seg_i = CURSEG_I(sbi, i);
1314 if (sbi->ckpt->alloc_type[i] == SSR)
1315 blkoff = sbi->blocks_per_seg;
1317 blkoff = curseg_blkoff(sbi, i);
1319 for (j = 0; j < blkoff; j++) {
1321 page = grab_meta_page(sbi, blkaddr++);
1322 kaddr = (unsigned char *)page_address(page);
1325 summary = (struct f2fs_summary *)(kaddr + written_size);
1326 *summary = seg_i->sum_blk->entries[j];
1327 written_size += SUMMARY_SIZE;
1329 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1333 set_page_dirty(page);
1334 f2fs_put_page(page, 1);
1339 set_page_dirty(page);
1340 f2fs_put_page(page, 1);
1344 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1345 block_t blkaddr, int type)
1348 if (IS_DATASEG(type))
1349 end = type + NR_CURSEG_DATA_TYPE;
1351 end = type + NR_CURSEG_NODE_TYPE;
1353 for (i = type; i < end; i++) {
1354 struct curseg_info *sum = CURSEG_I(sbi, i);
1355 mutex_lock(&sum->curseg_mutex);
1356 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1357 mutex_unlock(&sum->curseg_mutex);
1361 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1363 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1364 write_compacted_summaries(sbi, start_blk);
1366 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1369 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1371 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
1372 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1375 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1376 unsigned int val, int alloc)
1380 if (type == NAT_JOURNAL) {
1381 for (i = 0; i < nats_in_cursum(sum); i++) {
1382 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1385 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1386 return update_nats_in_cursum(sum, 1);
1387 } else if (type == SIT_JOURNAL) {
1388 for (i = 0; i < sits_in_cursum(sum); i++)
1389 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1391 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1392 return update_sits_in_cursum(sum, 1);
1397 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1400 struct sit_info *sit_i = SIT_I(sbi);
1401 unsigned int offset = SIT_BLOCK_OFFSET(segno);
1402 block_t blk_addr = sit_i->sit_base_addr + offset;
1404 check_seg_range(sbi, segno);
1406 /* calculate sit block address */
1407 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1408 blk_addr += sit_i->sit_blocks;
1410 return get_meta_page(sbi, blk_addr);
1413 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1416 struct sit_info *sit_i = SIT_I(sbi);
1417 struct page *src_page, *dst_page;
1418 pgoff_t src_off, dst_off;
1419 void *src_addr, *dst_addr;
1421 src_off = current_sit_addr(sbi, start);
1422 dst_off = next_sit_addr(sbi, src_off);
1424 /* get current sit block page without lock */
1425 src_page = get_meta_page(sbi, src_off);
1426 dst_page = grab_meta_page(sbi, dst_off);
1427 f2fs_bug_on(sbi, PageDirty(src_page));
1429 src_addr = page_address(src_page);
1430 dst_addr = page_address(dst_page);
1431 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1433 set_page_dirty(dst_page);
1434 f2fs_put_page(src_page, 1);
1436 set_to_next_sit(sit_i, start);
1441 static struct sit_entry_set *grab_sit_entry_set(void)
1443 struct sit_entry_set *ses =
1444 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
1447 INIT_LIST_HEAD(&ses->set_list);
1451 static void release_sit_entry_set(struct sit_entry_set *ses)
1453 list_del(&ses->set_list);
1454 kmem_cache_free(sit_entry_set_slab, ses);
1457 static void adjust_sit_entry_set(struct sit_entry_set *ses,
1458 struct list_head *head)
1460 struct sit_entry_set *next = ses;
1462 if (list_is_last(&ses->set_list, head))
1465 list_for_each_entry_continue(next, head, set_list)
1466 if (ses->entry_cnt <= next->entry_cnt)
1469 list_move_tail(&ses->set_list, &next->set_list);
1472 static void add_sit_entry(unsigned int segno, struct list_head *head)
1474 struct sit_entry_set *ses;
1475 unsigned int start_segno = START_SEGNO(segno);
1477 list_for_each_entry(ses, head, set_list) {
1478 if (ses->start_segno == start_segno) {
1480 adjust_sit_entry_set(ses, head);
1485 ses = grab_sit_entry_set();
1487 ses->start_segno = start_segno;
1489 list_add(&ses->set_list, head);
1492 static void add_sits_in_set(struct f2fs_sb_info *sbi)
1494 struct f2fs_sm_info *sm_info = SM_I(sbi);
1495 struct list_head *set_list = &sm_info->sit_entry_set;
1496 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
1497 unsigned long nsegs = TOTAL_SEGS(sbi);
1500 for_each_set_bit(segno, bitmap, nsegs)
1501 add_sit_entry(segno, set_list);
1504 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
1506 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1507 struct f2fs_summary_block *sum = curseg->sum_blk;
1510 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1514 segno = le32_to_cpu(segno_in_journal(sum, i));
1515 dirtied = __mark_sit_entry_dirty(sbi, segno);
1518 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
1520 update_sits_in_cursum(sum, -sits_in_cursum(sum));
1524 * CP calls this function, which flushes SIT entries including sit_journal,
1525 * and moves prefree segs to free segs.
1527 void flush_sit_entries(struct f2fs_sb_info *sbi)
1529 struct sit_info *sit_i = SIT_I(sbi);
1530 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1531 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1532 struct f2fs_summary_block *sum = curseg->sum_blk;
1533 struct sit_entry_set *ses, *tmp;
1534 struct list_head *head = &SM_I(sbi)->sit_entry_set;
1535 unsigned long nsegs = TOTAL_SEGS(sbi);
1536 bool to_journal = true;
1538 mutex_lock(&curseg->curseg_mutex);
1539 mutex_lock(&sit_i->sentry_lock);
1542 * add and account sit entries of dirty bitmap in sit entry
1545 add_sits_in_set(sbi);
1548 * if there are no enough space in journal to store dirty sit
1549 * entries, remove all entries from journal and add and account
1550 * them in sit entry set.
1552 if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
1553 remove_sits_in_journal(sbi);
1555 if (!sit_i->dirty_sentries)
1559 * there are two steps to flush sit entries:
1560 * #1, flush sit entries to journal in current cold data summary block.
1561 * #2, flush sit entries to sit page.
1563 list_for_each_entry_safe(ses, tmp, head, set_list) {
1565 struct f2fs_sit_block *raw_sit = NULL;
1566 unsigned int start_segno = ses->start_segno;
1567 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
1569 unsigned int segno = start_segno;
1572 !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
1576 page = get_next_sit_page(sbi, start_segno);
1577 raw_sit = page_address(page);
1580 /* flush dirty sit entries in region of current sit set */
1581 for_each_set_bit_from(segno, bitmap, end) {
1582 int offset, sit_offset;
1583 struct seg_entry *se = get_seg_entry(sbi, segno);
1585 /* add discard candidates */
1586 if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards)
1587 add_discard_addrs(sbi, segno, se);
1590 offset = lookup_journal_in_cursum(sum,
1591 SIT_JOURNAL, segno, 1);
1592 f2fs_bug_on(sbi, offset < 0);
1593 segno_in_journal(sum, offset) =
1595 seg_info_to_raw_sit(se,
1596 &sit_in_journal(sum, offset));
1598 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1599 seg_info_to_raw_sit(se,
1600 &raw_sit->entries[sit_offset]);
1603 __clear_bit(segno, bitmap);
1604 sit_i->dirty_sentries--;
1609 f2fs_put_page(page, 1);
1611 f2fs_bug_on(sbi, ses->entry_cnt);
1612 release_sit_entry_set(ses);
1615 f2fs_bug_on(sbi, !list_empty(head));
1616 f2fs_bug_on(sbi, sit_i->dirty_sentries);
1619 mutex_unlock(&sit_i->sentry_lock);
1620 mutex_unlock(&curseg->curseg_mutex);
1622 set_prefree_as_free_segments(sbi);
1625 static int build_sit_info(struct f2fs_sb_info *sbi)
1627 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1628 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1629 struct sit_info *sit_i;
1630 unsigned int sit_segs, start;
1631 char *src_bitmap, *dst_bitmap;
1632 unsigned int bitmap_size;
1634 /* allocate memory for SIT information */
1635 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1639 SM_I(sbi)->sit_info = sit_i;
1641 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1642 if (!sit_i->sentries)
1645 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1646 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1647 if (!sit_i->dirty_sentries_bitmap)
1650 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1651 sit_i->sentries[start].cur_valid_map
1652 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1653 sit_i->sentries[start].ckpt_valid_map
1654 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1655 if (!sit_i->sentries[start].cur_valid_map
1656 || !sit_i->sentries[start].ckpt_valid_map)
1660 if (sbi->segs_per_sec > 1) {
1661 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
1662 sizeof(struct sec_entry));
1663 if (!sit_i->sec_entries)
1667 /* get information related with SIT */
1668 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1670 /* setup SIT bitmap from ckeckpoint pack */
1671 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1672 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1674 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
1678 /* init SIT information */
1679 sit_i->s_ops = &default_salloc_ops;
1681 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1682 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1683 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1684 sit_i->sit_bitmap = dst_bitmap;
1685 sit_i->bitmap_size = bitmap_size;
1686 sit_i->dirty_sentries = 0;
1687 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1688 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1689 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1690 mutex_init(&sit_i->sentry_lock);
1694 static int build_free_segmap(struct f2fs_sb_info *sbi)
1696 struct f2fs_sm_info *sm_info = SM_I(sbi);
1697 struct free_segmap_info *free_i;
1698 unsigned int bitmap_size, sec_bitmap_size;
1700 /* allocate memory for free segmap information */
1701 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1705 SM_I(sbi)->free_info = free_i;
1707 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1708 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1709 if (!free_i->free_segmap)
1712 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1713 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1714 if (!free_i->free_secmap)
1717 /* set all segments as dirty temporarily */
1718 memset(free_i->free_segmap, 0xff, bitmap_size);
1719 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1721 /* init free segmap information */
1722 free_i->start_segno =
1723 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1724 free_i->free_segments = 0;
1725 free_i->free_sections = 0;
1726 rwlock_init(&free_i->segmap_lock);
1730 static int build_curseg(struct f2fs_sb_info *sbi)
1732 struct curseg_info *array;
1735 array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
1739 SM_I(sbi)->curseg_array = array;
1741 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1742 mutex_init(&array[i].curseg_mutex);
1743 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1744 if (!array[i].sum_blk)
1746 array[i].segno = NULL_SEGNO;
1747 array[i].next_blkoff = 0;
1749 return restore_curseg_summaries(sbi);
1752 static void build_sit_entries(struct f2fs_sb_info *sbi)
1754 struct sit_info *sit_i = SIT_I(sbi);
1755 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1756 struct f2fs_summary_block *sum = curseg->sum_blk;
1757 int sit_blk_cnt = SIT_BLK_CNT(sbi);
1758 unsigned int i, start, end;
1759 unsigned int readed, start_blk = 0;
1760 int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
1763 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
1765 start = start_blk * sit_i->sents_per_block;
1766 end = (start_blk + readed) * sit_i->sents_per_block;
1768 for (; start < end && start < TOTAL_SEGS(sbi); start++) {
1769 struct seg_entry *se = &sit_i->sentries[start];
1770 struct f2fs_sit_block *sit_blk;
1771 struct f2fs_sit_entry sit;
1774 mutex_lock(&curseg->curseg_mutex);
1775 for (i = 0; i < sits_in_cursum(sum); i++) {
1776 if (le32_to_cpu(segno_in_journal(sum, i))
1778 sit = sit_in_journal(sum, i);
1779 mutex_unlock(&curseg->curseg_mutex);
1783 mutex_unlock(&curseg->curseg_mutex);
1785 page = get_current_sit_page(sbi, start);
1786 sit_blk = (struct f2fs_sit_block *)page_address(page);
1787 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1788 f2fs_put_page(page, 1);
1790 check_block_count(sbi, start, &sit);
1791 seg_info_from_raw_sit(se, &sit);
1792 if (sbi->segs_per_sec > 1) {
1793 struct sec_entry *e = get_sec_entry(sbi, start);
1794 e->valid_blocks += se->valid_blocks;
1797 start_blk += readed;
1798 } while (start_blk < sit_blk_cnt);
1801 static void init_free_segmap(struct f2fs_sb_info *sbi)
1806 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1807 struct seg_entry *sentry = get_seg_entry(sbi, start);
1808 if (!sentry->valid_blocks)
1809 __set_free(sbi, start);
1812 /* set use the current segments */
1813 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1814 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1815 __set_test_and_inuse(sbi, curseg_t->segno);
1819 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1821 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1822 struct free_segmap_info *free_i = FREE_I(sbi);
1823 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
1824 unsigned short valid_blocks;
1827 /* find dirty segment based on free segmap */
1828 segno = find_next_inuse(free_i, total_segs, offset);
1829 if (segno >= total_segs)
1832 valid_blocks = get_valid_blocks(sbi, segno, 0);
1833 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
1835 if (valid_blocks > sbi->blocks_per_seg) {
1836 f2fs_bug_on(sbi, 1);
1839 mutex_lock(&dirty_i->seglist_lock);
1840 __locate_dirty_segment(sbi, segno, DIRTY);
1841 mutex_unlock(&dirty_i->seglist_lock);
1845 static int init_victim_secmap(struct f2fs_sb_info *sbi)
1847 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1848 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1850 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1851 if (!dirty_i->victim_secmap)
1856 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1858 struct dirty_seglist_info *dirty_i;
1859 unsigned int bitmap_size, i;
1861 /* allocate memory for dirty segments list information */
1862 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1866 SM_I(sbi)->dirty_info = dirty_i;
1867 mutex_init(&dirty_i->seglist_lock);
1869 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1871 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1872 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
1873 if (!dirty_i->dirty_segmap[i])
1877 init_dirty_segmap(sbi);
1878 return init_victim_secmap(sbi);
1882 * Update min, max modified time for cost-benefit GC algorithm
1884 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1886 struct sit_info *sit_i = SIT_I(sbi);
1889 mutex_lock(&sit_i->sentry_lock);
1891 sit_i->min_mtime = LLONG_MAX;
1893 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1895 unsigned long long mtime = 0;
1897 for (i = 0; i < sbi->segs_per_sec; i++)
1898 mtime += get_seg_entry(sbi, segno + i)->mtime;
1900 mtime = div_u64(mtime, sbi->segs_per_sec);
1902 if (sit_i->min_mtime > mtime)
1903 sit_i->min_mtime = mtime;
1905 sit_i->max_mtime = get_mtime(sbi);
1906 mutex_unlock(&sit_i->sentry_lock);
1909 int build_segment_manager(struct f2fs_sb_info *sbi)
1911 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1912 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1913 struct f2fs_sm_info *sm_info;
1916 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1921 sbi->sm_info = sm_info;
1922 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1923 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1924 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1925 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1926 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1927 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1928 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1929 sm_info->rec_prefree_segments = sm_info->main_segments *
1930 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
1931 sm_info->ipu_policy = F2FS_IPU_FSYNC;
1932 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
1933 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
1935 INIT_LIST_HEAD(&sm_info->discard_list);
1936 sm_info->nr_discards = 0;
1937 sm_info->max_discards = 0;
1939 INIT_LIST_HEAD(&sm_info->sit_entry_set);
1941 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
1942 err = create_flush_cmd_control(sbi);
1947 err = build_sit_info(sbi);
1950 err = build_free_segmap(sbi);
1953 err = build_curseg(sbi);
1957 /* reinit free segmap based on SIT */
1958 build_sit_entries(sbi);
1960 init_free_segmap(sbi);
1961 err = build_dirty_segmap(sbi);
1965 init_min_max_mtime(sbi);
1969 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1970 enum dirty_type dirty_type)
1972 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1974 mutex_lock(&dirty_i->seglist_lock);
1975 kfree(dirty_i->dirty_segmap[dirty_type]);
1976 dirty_i->nr_dirty[dirty_type] = 0;
1977 mutex_unlock(&dirty_i->seglist_lock);
1980 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
1982 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1983 kfree(dirty_i->victim_secmap);
1986 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1988 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1994 /* discard pre-free/dirty segments list */
1995 for (i = 0; i < NR_DIRTY_TYPE; i++)
1996 discard_dirty_segmap(sbi, i);
1998 destroy_victim_secmap(sbi);
1999 SM_I(sbi)->dirty_info = NULL;
2003 static void destroy_curseg(struct f2fs_sb_info *sbi)
2005 struct curseg_info *array = SM_I(sbi)->curseg_array;
2010 SM_I(sbi)->curseg_array = NULL;
2011 for (i = 0; i < NR_CURSEG_TYPE; i++)
2012 kfree(array[i].sum_blk);
2016 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2018 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2021 SM_I(sbi)->free_info = NULL;
2022 kfree(free_i->free_segmap);
2023 kfree(free_i->free_secmap);
2027 static void destroy_sit_info(struct f2fs_sb_info *sbi)
2029 struct sit_info *sit_i = SIT_I(sbi);
2035 if (sit_i->sentries) {
2036 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
2037 kfree(sit_i->sentries[start].cur_valid_map);
2038 kfree(sit_i->sentries[start].ckpt_valid_map);
2041 vfree(sit_i->sentries);
2042 vfree(sit_i->sec_entries);
2043 kfree(sit_i->dirty_sentries_bitmap);
2045 SM_I(sbi)->sit_info = NULL;
2046 kfree(sit_i->sit_bitmap);
2050 void destroy_segment_manager(struct f2fs_sb_info *sbi)
2052 struct f2fs_sm_info *sm_info = SM_I(sbi);
2056 destroy_flush_cmd_control(sbi);
2057 destroy_dirty_segmap(sbi);
2058 destroy_curseg(sbi);
2059 destroy_free_segmap(sbi);
2060 destroy_sit_info(sbi);
2061 sbi->sm_info = NULL;
2065 int __init create_segment_manager_caches(void)
2067 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
2068 sizeof(struct discard_entry));
2069 if (!discard_entry_slab)
2072 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2073 sizeof(struct nat_entry_set));
2074 if (!sit_entry_set_slab)
2075 goto destory_discard_entry;
2078 destory_discard_entry:
2079 kmem_cache_destroy(discard_entry_slab);
2084 void destroy_segment_manager_caches(void)
2086 kmem_cache_destroy(sit_entry_set_slab);
2087 kmem_cache_destroy(discard_entry_slab);