4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/vmalloc.h>
21 #include <trace/events/f2fs.h>
24 * This function balances dirty node and dentry pages.
25 * In addition, it controls garbage collection.
27 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
30 * We should do GC or end up with checkpoint, if there are so many dirty
31 * dir/node pages without enough free segments.
33 if (has_not_enough_free_secs(sbi, 0)) {
34 mutex_lock(&sbi->gc_mutex);
39 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
40 enum dirty_type dirty_type)
42 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
44 /* need not be added */
45 if (IS_CURSEG(sbi, segno))
48 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
49 dirty_i->nr_dirty[dirty_type]++;
51 if (dirty_type == DIRTY) {
52 struct seg_entry *sentry = get_seg_entry(sbi, segno);
53 enum dirty_type t = DIRTY_HOT_DATA;
55 dirty_type = sentry->type;
57 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
58 dirty_i->nr_dirty[dirty_type]++;
60 /* Only one bitmap should be set */
61 for (; t <= DIRTY_COLD_NODE; t++) {
64 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
65 dirty_i->nr_dirty[t]--;
70 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
71 enum dirty_type dirty_type)
73 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
75 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
76 dirty_i->nr_dirty[dirty_type]--;
78 if (dirty_type == DIRTY) {
79 enum dirty_type t = DIRTY_HOT_DATA;
81 /* clear all the bitmaps */
82 for (; t <= DIRTY_COLD_NODE; t++)
83 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
84 dirty_i->nr_dirty[t]--;
86 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
87 clear_bit(GET_SECNO(sbi, segno),
88 dirty_i->victim_secmap);
93 * Should not occur error such as -ENOMEM.
94 * Adding dirty entry into seglist is not critical operation.
95 * If a given segment is one of current working segments, it won't be added.
97 void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
99 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
100 unsigned short valid_blocks;
102 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
105 mutex_lock(&dirty_i->seglist_lock);
107 valid_blocks = get_valid_blocks(sbi, segno, 0);
109 if (valid_blocks == 0) {
110 __locate_dirty_segment(sbi, segno, PRE);
111 __remove_dirty_segment(sbi, segno, DIRTY);
112 } else if (valid_blocks < sbi->blocks_per_seg) {
113 __locate_dirty_segment(sbi, segno, DIRTY);
115 /* Recovery routine with SSR needs this */
116 __remove_dirty_segment(sbi, segno, DIRTY);
119 mutex_unlock(&dirty_i->seglist_lock);
124 * Should call clear_prefree_segments after checkpoint is done.
126 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
128 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
129 unsigned int segno = -1;
130 unsigned int total_segs = TOTAL_SEGS(sbi);
132 mutex_lock(&dirty_i->seglist_lock);
134 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
136 if (segno >= total_segs)
138 __set_test_and_free(sbi, segno);
140 mutex_unlock(&dirty_i->seglist_lock);
143 void clear_prefree_segments(struct f2fs_sb_info *sbi)
145 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
146 unsigned int segno = -1;
147 unsigned int total_segs = TOTAL_SEGS(sbi);
149 mutex_lock(&dirty_i->seglist_lock);
151 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
153 if (segno >= total_segs)
156 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
157 dirty_i->nr_dirty[PRE]--;
160 if (test_opt(sbi, DISCARD))
161 blkdev_issue_discard(sbi->sb->s_bdev,
162 START_BLOCK(sbi, segno) <<
163 sbi->log_sectors_per_block,
164 1 << (sbi->log_sectors_per_block +
165 sbi->log_blocks_per_seg),
168 mutex_unlock(&dirty_i->seglist_lock);
171 static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
173 struct sit_info *sit_i = SIT_I(sbi);
174 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
175 sit_i->dirty_sentries++;
178 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
179 unsigned int segno, int modified)
181 struct seg_entry *se = get_seg_entry(sbi, segno);
184 __mark_sit_entry_dirty(sbi, segno);
187 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
189 struct seg_entry *se;
190 unsigned int segno, offset;
191 long int new_vblocks;
193 segno = GET_SEGNO(sbi, blkaddr);
195 se = get_seg_entry(sbi, segno);
196 new_vblocks = se->valid_blocks + del;
197 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
199 BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
200 (new_vblocks > sbi->blocks_per_seg)));
202 se->valid_blocks = new_vblocks;
203 se->mtime = get_mtime(sbi);
204 SIT_I(sbi)->max_mtime = se->mtime;
206 /* Update valid block bitmap */
208 if (f2fs_set_bit(offset, se->cur_valid_map))
211 if (!f2fs_clear_bit(offset, se->cur_valid_map))
214 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
215 se->ckpt_valid_blocks += del;
217 __mark_sit_entry_dirty(sbi, segno);
219 /* update total number of valid blocks to be written in ckpt area */
220 SIT_I(sbi)->written_valid_blocks += del;
222 if (sbi->segs_per_sec > 1)
223 get_sec_entry(sbi, segno)->valid_blocks += del;
226 static void refresh_sit_entry(struct f2fs_sb_info *sbi,
227 block_t old_blkaddr, block_t new_blkaddr)
229 update_sit_entry(sbi, new_blkaddr, 1);
230 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
231 update_sit_entry(sbi, old_blkaddr, -1);
234 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
236 unsigned int segno = GET_SEGNO(sbi, addr);
237 struct sit_info *sit_i = SIT_I(sbi);
239 BUG_ON(addr == NULL_ADDR);
240 if (addr == NEW_ADDR)
243 /* add it into sit main buffer */
244 mutex_lock(&sit_i->sentry_lock);
246 update_sit_entry(sbi, addr, -1);
248 /* add it into dirty seglist */
249 locate_dirty_segment(sbi, segno);
251 mutex_unlock(&sit_i->sentry_lock);
255 * This function should be resided under the curseg_mutex lock
257 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
258 struct f2fs_summary *sum, unsigned short offset)
260 struct curseg_info *curseg = CURSEG_I(sbi, type);
261 void *addr = curseg->sum_blk;
262 addr += offset * sizeof(struct f2fs_summary);
263 memcpy(addr, sum, sizeof(struct f2fs_summary));
268 * Calculate the number of current summary pages for writing
270 int npages_for_summary_flush(struct f2fs_sb_info *sbi)
272 int total_size_bytes = 0;
273 int valid_sum_count = 0;
276 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
277 if (sbi->ckpt->alloc_type[i] == SSR)
278 valid_sum_count += sbi->blocks_per_seg;
280 valid_sum_count += curseg_blkoff(sbi, i);
283 total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
284 + sizeof(struct nat_journal) + 2
285 + sizeof(struct sit_journal) + 2;
286 sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
287 if (total_size_bytes < sum_space)
289 else if (total_size_bytes < 2 * sum_space)
295 * Caller should put this summary page
297 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
299 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
302 static void write_sum_page(struct f2fs_sb_info *sbi,
303 struct f2fs_summary_block *sum_blk, block_t blk_addr)
305 struct page *page = grab_meta_page(sbi, blk_addr);
306 void *kaddr = page_address(page);
307 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
308 set_page_dirty(page);
309 f2fs_put_page(page, 1);
312 static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
314 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
315 unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
317 unsigned int ofs = 0;
320 * If there is not enough reserved sections,
321 * we should not reuse prefree segments.
323 if (has_not_enough_free_secs(sbi, 0))
327 * NODE page should not reuse prefree segment,
328 * since those information is used for SPOR.
330 if (IS_NODESEG(type))
333 segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
334 ofs += sbi->segs_per_sec;
336 if (segno < TOTAL_SEGS(sbi)) {
339 /* skip intermediate segments in a section */
340 if (segno % sbi->segs_per_sec)
343 /* skip if the section is currently used */
344 if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
347 /* skip if whole section is not prefree */
348 for (i = 1; i < sbi->segs_per_sec; i++)
349 if (!test_bit(segno + i, prefree_segmap))
352 /* skip if whole section was not free at the last checkpoint */
353 for (i = 0; i < sbi->segs_per_sec; i++)
354 if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
362 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
364 struct curseg_info *curseg = CURSEG_I(sbi, type);
365 unsigned int segno = curseg->segno + 1;
366 struct free_segmap_info *free_i = FREE_I(sbi);
368 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
369 return !test_bit(segno, free_i->free_segmap);
374 * Find a new segment from the free segments bitmap to right order
375 * This function should be returned with success, otherwise BUG
377 static void get_new_segment(struct f2fs_sb_info *sbi,
378 unsigned int *newseg, bool new_sec, int dir)
380 struct free_segmap_info *free_i = FREE_I(sbi);
381 unsigned int segno, secno, zoneno;
382 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
383 unsigned int hint = *newseg / sbi->segs_per_sec;
384 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
385 unsigned int left_start = hint;
390 write_lock(&free_i->segmap_lock);
392 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
393 segno = find_next_zero_bit(free_i->free_segmap,
394 TOTAL_SEGS(sbi), *newseg + 1);
395 if (segno - *newseg < sbi->segs_per_sec -
396 (*newseg % sbi->segs_per_sec))
400 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
401 if (secno >= TOTAL_SECS(sbi)) {
402 if (dir == ALLOC_RIGHT) {
403 secno = find_next_zero_bit(free_i->free_secmap,
405 BUG_ON(secno >= TOTAL_SECS(sbi));
408 left_start = hint - 1;
414 while (test_bit(left_start, free_i->free_secmap)) {
415 if (left_start > 0) {
419 left_start = find_next_zero_bit(free_i->free_secmap,
421 BUG_ON(left_start >= TOTAL_SECS(sbi));
427 segno = secno * sbi->segs_per_sec;
428 zoneno = secno / sbi->secs_per_zone;
430 /* give up on finding another zone */
433 if (sbi->secs_per_zone == 1)
435 if (zoneno == old_zoneno)
437 if (dir == ALLOC_LEFT) {
438 if (!go_left && zoneno + 1 >= total_zones)
440 if (go_left && zoneno == 0)
443 for (i = 0; i < NR_CURSEG_TYPE; i++)
444 if (CURSEG_I(sbi, i)->zone == zoneno)
447 if (i < NR_CURSEG_TYPE) {
448 /* zone is in user, try another */
450 hint = zoneno * sbi->secs_per_zone - 1;
451 else if (zoneno + 1 >= total_zones)
454 hint = (zoneno + 1) * sbi->secs_per_zone;
456 goto find_other_zone;
459 /* set it as dirty segment in free segmap */
460 BUG_ON(test_bit(segno, free_i->free_segmap));
461 __set_inuse(sbi, segno);
463 write_unlock(&free_i->segmap_lock);
466 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
468 struct curseg_info *curseg = CURSEG_I(sbi, type);
469 struct summary_footer *sum_footer;
471 curseg->segno = curseg->next_segno;
472 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
473 curseg->next_blkoff = 0;
474 curseg->next_segno = NULL_SEGNO;
476 sum_footer = &(curseg->sum_blk->footer);
477 memset(sum_footer, 0, sizeof(struct summary_footer));
478 if (IS_DATASEG(type))
479 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
480 if (IS_NODESEG(type))
481 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
482 __set_sit_entry_type(sbi, type, curseg->segno, modified);
486 * Allocate a current working segment.
487 * This function always allocates a free segment in LFS manner.
489 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
491 struct curseg_info *curseg = CURSEG_I(sbi, type);
492 unsigned int segno = curseg->segno;
493 int dir = ALLOC_LEFT;
495 write_sum_page(sbi, curseg->sum_blk,
496 GET_SUM_BLOCK(sbi, segno));
497 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
500 if (test_opt(sbi, NOHEAP))
503 get_new_segment(sbi, &segno, new_sec, dir);
504 curseg->next_segno = segno;
505 reset_curseg(sbi, type, 1);
506 curseg->alloc_type = LFS;
509 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
510 struct curseg_info *seg, block_t start)
512 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
514 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
515 if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
516 && !f2fs_test_bit(ofs, se->cur_valid_map))
519 seg->next_blkoff = ofs;
523 * If a segment is written by LFS manner, next block offset is just obtained
524 * by increasing the current block offset. However, if a segment is written by
525 * SSR manner, next block offset obtained by calling __next_free_blkoff
527 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
528 struct curseg_info *seg)
530 if (seg->alloc_type == SSR)
531 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
537 * This function always allocates a used segment (from dirty seglist) by SSR
538 * manner, so it should recover the existing segment information of valid blocks
540 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
542 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
543 struct curseg_info *curseg = CURSEG_I(sbi, type);
544 unsigned int new_segno = curseg->next_segno;
545 struct f2fs_summary_block *sum_node;
546 struct page *sum_page;
548 write_sum_page(sbi, curseg->sum_blk,
549 GET_SUM_BLOCK(sbi, curseg->segno));
550 __set_test_and_inuse(sbi, new_segno);
552 mutex_lock(&dirty_i->seglist_lock);
553 __remove_dirty_segment(sbi, new_segno, PRE);
554 __remove_dirty_segment(sbi, new_segno, DIRTY);
555 mutex_unlock(&dirty_i->seglist_lock);
557 reset_curseg(sbi, type, 1);
558 curseg->alloc_type = SSR;
559 __next_free_blkoff(sbi, curseg, 0);
562 sum_page = get_sum_page(sbi, new_segno);
563 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
564 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
565 f2fs_put_page(sum_page, 1);
569 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
571 struct curseg_info *curseg = CURSEG_I(sbi, type);
572 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
574 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
575 return v_ops->get_victim(sbi,
576 &(curseg)->next_segno, BG_GC, type, SSR);
578 /* For data segments, let's do SSR more intensively */
579 for (; type >= CURSEG_HOT_DATA; type--)
580 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
587 * flush out current segment and replace it with new segment
588 * This function should be returned with success, otherwise BUG
590 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
591 int type, bool force)
593 struct curseg_info *curseg = CURSEG_I(sbi, type);
596 new_curseg(sbi, type, true);
600 curseg->next_segno = check_prefree_segments(sbi, type);
602 if (curseg->next_segno != NULL_SEGNO)
603 change_curseg(sbi, type, false);
604 else if (type == CURSEG_WARM_NODE)
605 new_curseg(sbi, type, false);
606 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
607 new_curseg(sbi, type, false);
608 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
609 change_curseg(sbi, type, true);
611 new_curseg(sbi, type, false);
613 sbi->segment_count[curseg->alloc_type]++;
616 void allocate_new_segments(struct f2fs_sb_info *sbi)
618 struct curseg_info *curseg;
619 unsigned int old_curseg;
622 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
623 curseg = CURSEG_I(sbi, i);
624 old_curseg = curseg->segno;
625 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
626 locate_dirty_segment(sbi, old_curseg);
630 static const struct segment_allocation default_salloc_ops = {
631 .allocate_segment = allocate_segment_by_default,
634 static void f2fs_end_io_write(struct bio *bio, int err)
636 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
637 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
638 struct bio_private *p = bio->bi_private;
641 struct page *page = bvec->bv_page;
643 if (--bvec >= bio->bi_io_vec)
644 prefetchw(&bvec->bv_page->flags);
648 set_bit(AS_EIO, &page->mapping->flags);
649 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
650 p->sbi->sb->s_flags |= MS_RDONLY;
652 end_page_writeback(page);
653 dec_page_count(p->sbi, F2FS_WRITEBACK);
654 } while (bvec >= bio->bi_io_vec);
662 struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
665 struct bio_private *priv;
667 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
673 /* No failure on bio allocation */
674 bio = bio_alloc(GFP_NOIO, npages);
676 bio->bi_private = priv;
680 static void do_submit_bio(struct f2fs_sb_info *sbi,
681 enum page_type type, bool sync)
683 int rw = sync ? WRITE_SYNC : WRITE;
684 enum page_type btype = type > META ? META : type;
686 if (type >= META_FLUSH)
687 rw = WRITE_FLUSH_FUA;
692 if (sbi->bio[btype]) {
693 struct bio_private *p = sbi->bio[btype]->bi_private;
695 sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
697 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
699 if (type == META_FLUSH) {
700 DECLARE_COMPLETION_ONSTACK(wait);
703 submit_bio(rw, sbi->bio[btype]);
704 wait_for_completion(&wait);
707 submit_bio(rw, sbi->bio[btype]);
709 sbi->bio[btype] = NULL;
713 void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
715 down_write(&sbi->bio_sem);
716 do_submit_bio(sbi, type, sync);
717 up_write(&sbi->bio_sem);
720 static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
721 block_t blk_addr, enum page_type type)
723 struct block_device *bdev = sbi->sb->s_bdev;
725 verify_block_addr(sbi, blk_addr);
727 down_write(&sbi->bio_sem);
729 inc_page_count(sbi, F2FS_WRITEBACK);
731 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
732 do_submit_bio(sbi, type, false);
734 if (sbi->bio[type] == NULL) {
735 sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
736 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
738 * The end_io will be assigned at the sumbission phase.
739 * Until then, let bio_add_page() merge consecutive IOs as much
744 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
746 do_submit_bio(sbi, type, false);
750 sbi->last_block_in_bio[type] = blk_addr;
752 up_write(&sbi->bio_sem);
753 trace_f2fs_submit_write_page(page, blk_addr, type);
756 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
758 struct curseg_info *curseg = CURSEG_I(sbi, type);
759 if (curseg->next_blkoff < sbi->blocks_per_seg)
764 static int __get_segment_type_2(struct page *page, enum page_type p_type)
767 return CURSEG_HOT_DATA;
769 return CURSEG_HOT_NODE;
772 static int __get_segment_type_4(struct page *page, enum page_type p_type)
774 if (p_type == DATA) {
775 struct inode *inode = page->mapping->host;
777 if (S_ISDIR(inode->i_mode))
778 return CURSEG_HOT_DATA;
780 return CURSEG_COLD_DATA;
782 if (IS_DNODE(page) && !is_cold_node(page))
783 return CURSEG_HOT_NODE;
785 return CURSEG_COLD_NODE;
789 static int __get_segment_type_6(struct page *page, enum page_type p_type)
791 if (p_type == DATA) {
792 struct inode *inode = page->mapping->host;
794 if (S_ISDIR(inode->i_mode))
795 return CURSEG_HOT_DATA;
796 else if (is_cold_data(page) || is_cold_file(inode))
797 return CURSEG_COLD_DATA;
799 return CURSEG_WARM_DATA;
802 return is_cold_node(page) ? CURSEG_WARM_NODE :
805 return CURSEG_COLD_NODE;
809 static int __get_segment_type(struct page *page, enum page_type p_type)
811 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
812 switch (sbi->active_logs) {
814 return __get_segment_type_2(page, p_type);
816 return __get_segment_type_4(page, p_type);
818 /* NR_CURSEG_TYPE(6) logs by default */
819 BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
820 return __get_segment_type_6(page, p_type);
823 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
824 block_t old_blkaddr, block_t *new_blkaddr,
825 struct f2fs_summary *sum, enum page_type p_type)
827 struct sit_info *sit_i = SIT_I(sbi);
828 struct curseg_info *curseg;
829 unsigned int old_cursegno;
832 type = __get_segment_type(page, p_type);
833 curseg = CURSEG_I(sbi, type);
835 mutex_lock(&curseg->curseg_mutex);
837 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
838 old_cursegno = curseg->segno;
841 * __add_sum_entry should be resided under the curseg_mutex
842 * because, this function updates a summary entry in the
843 * current summary block.
845 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
847 mutex_lock(&sit_i->sentry_lock);
848 __refresh_next_blkoff(sbi, curseg);
849 sbi->block_count[curseg->alloc_type]++;
852 * SIT information should be updated before segment allocation,
853 * since SSR needs latest valid block information.
855 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
857 if (!__has_curseg_space(sbi, type))
858 sit_i->s_ops->allocate_segment(sbi, type, false);
860 locate_dirty_segment(sbi, old_cursegno);
861 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
862 mutex_unlock(&sit_i->sentry_lock);
865 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
867 /* writeout dirty page into bdev */
868 submit_write_page(sbi, page, *new_blkaddr, p_type);
870 mutex_unlock(&curseg->curseg_mutex);
873 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
875 set_page_writeback(page);
876 submit_write_page(sbi, page, page->index, META);
879 void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
880 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
882 struct f2fs_summary sum;
883 set_summary(&sum, nid, 0, 0);
884 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
887 void write_data_page(struct inode *inode, struct page *page,
888 struct dnode_of_data *dn, block_t old_blkaddr,
889 block_t *new_blkaddr)
891 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
892 struct f2fs_summary sum;
895 BUG_ON(old_blkaddr == NULL_ADDR);
896 get_node_info(sbi, dn->nid, &ni);
897 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
899 do_write_page(sbi, page, old_blkaddr,
900 new_blkaddr, &sum, DATA);
903 void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
904 block_t old_blk_addr)
906 submit_write_page(sbi, page, old_blk_addr, DATA);
909 void recover_data_page(struct f2fs_sb_info *sbi,
910 struct page *page, struct f2fs_summary *sum,
911 block_t old_blkaddr, block_t new_blkaddr)
913 struct sit_info *sit_i = SIT_I(sbi);
914 struct curseg_info *curseg;
915 unsigned int segno, old_cursegno;
916 struct seg_entry *se;
919 segno = GET_SEGNO(sbi, new_blkaddr);
920 se = get_seg_entry(sbi, segno);
923 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
924 if (old_blkaddr == NULL_ADDR)
925 type = CURSEG_COLD_DATA;
927 type = CURSEG_WARM_DATA;
929 curseg = CURSEG_I(sbi, type);
931 mutex_lock(&curseg->curseg_mutex);
932 mutex_lock(&sit_i->sentry_lock);
934 old_cursegno = curseg->segno;
936 /* change the current segment */
937 if (segno != curseg->segno) {
938 curseg->next_segno = segno;
939 change_curseg(sbi, type, true);
942 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
943 (sbi->blocks_per_seg - 1);
944 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
946 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
948 locate_dirty_segment(sbi, old_cursegno);
949 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
951 mutex_unlock(&sit_i->sentry_lock);
952 mutex_unlock(&curseg->curseg_mutex);
955 void rewrite_node_page(struct f2fs_sb_info *sbi,
956 struct page *page, struct f2fs_summary *sum,
957 block_t old_blkaddr, block_t new_blkaddr)
959 struct sit_info *sit_i = SIT_I(sbi);
960 int type = CURSEG_WARM_NODE;
961 struct curseg_info *curseg;
962 unsigned int segno, old_cursegno;
963 block_t next_blkaddr = next_blkaddr_of_node(page);
964 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
966 curseg = CURSEG_I(sbi, type);
968 mutex_lock(&curseg->curseg_mutex);
969 mutex_lock(&sit_i->sentry_lock);
971 segno = GET_SEGNO(sbi, new_blkaddr);
972 old_cursegno = curseg->segno;
974 /* change the current segment */
975 if (segno != curseg->segno) {
976 curseg->next_segno = segno;
977 change_curseg(sbi, type, true);
979 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
980 (sbi->blocks_per_seg - 1);
981 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
983 /* change the current log to the next block addr in advance */
984 if (next_segno != segno) {
985 curseg->next_segno = next_segno;
986 change_curseg(sbi, type, true);
988 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
989 (sbi->blocks_per_seg - 1);
991 /* rewrite node page */
992 set_page_writeback(page);
993 submit_write_page(sbi, page, new_blkaddr, NODE);
994 f2fs_submit_bio(sbi, NODE, true);
995 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
997 locate_dirty_segment(sbi, old_cursegno);
998 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
1000 mutex_unlock(&sit_i->sentry_lock);
1001 mutex_unlock(&curseg->curseg_mutex);
1004 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1006 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1007 struct curseg_info *seg_i;
1008 unsigned char *kaddr;
1013 start = start_sum_block(sbi);
1015 page = get_meta_page(sbi, start++);
1016 kaddr = (unsigned char *)page_address(page);
1018 /* Step 1: restore nat cache */
1019 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1020 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1022 /* Step 2: restore sit cache */
1023 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1024 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1026 offset = 2 * SUM_JOURNAL_SIZE;
1028 /* Step 3: restore summary entries */
1029 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1030 unsigned short blk_off;
1033 seg_i = CURSEG_I(sbi, i);
1034 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1035 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1036 seg_i->next_segno = segno;
1037 reset_curseg(sbi, i, 0);
1038 seg_i->alloc_type = ckpt->alloc_type[i];
1039 seg_i->next_blkoff = blk_off;
1041 if (seg_i->alloc_type == SSR)
1042 blk_off = sbi->blocks_per_seg;
1044 for (j = 0; j < blk_off; j++) {
1045 struct f2fs_summary *s;
1046 s = (struct f2fs_summary *)(kaddr + offset);
1047 seg_i->sum_blk->entries[j] = *s;
1048 offset += SUMMARY_SIZE;
1049 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1053 f2fs_put_page(page, 1);
1056 page = get_meta_page(sbi, start++);
1057 kaddr = (unsigned char *)page_address(page);
1061 f2fs_put_page(page, 1);
1065 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1067 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1068 struct f2fs_summary_block *sum;
1069 struct curseg_info *curseg;
1071 unsigned short blk_off;
1072 unsigned int segno = 0;
1073 block_t blk_addr = 0;
1075 /* get segment number and block addr */
1076 if (IS_DATASEG(type)) {
1077 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1078 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1080 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1081 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1083 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1085 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1087 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1089 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1090 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1091 type - CURSEG_HOT_NODE);
1093 blk_addr = GET_SUM_BLOCK(sbi, segno);
1096 new = get_meta_page(sbi, blk_addr);
1097 sum = (struct f2fs_summary_block *)page_address(new);
1099 if (IS_NODESEG(type)) {
1100 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
1101 struct f2fs_summary *ns = &sum->entries[0];
1103 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1105 ns->ofs_in_node = 0;
1108 if (restore_node_summary(sbi, segno, sum)) {
1109 f2fs_put_page(new, 1);
1115 /* set uncompleted segment to curseg */
1116 curseg = CURSEG_I(sbi, type);
1117 mutex_lock(&curseg->curseg_mutex);
1118 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1119 curseg->next_segno = segno;
1120 reset_curseg(sbi, type, 0);
1121 curseg->alloc_type = ckpt->alloc_type[type];
1122 curseg->next_blkoff = blk_off;
1123 mutex_unlock(&curseg->curseg_mutex);
1124 f2fs_put_page(new, 1);
1128 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1130 int type = CURSEG_HOT_DATA;
1132 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1133 /* restore for compacted data summary */
1134 if (read_compacted_summaries(sbi))
1136 type = CURSEG_HOT_NODE;
1139 for (; type <= CURSEG_COLD_NODE; type++)
1140 if (read_normal_summaries(sbi, type))
1145 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1148 unsigned char *kaddr;
1149 struct f2fs_summary *summary;
1150 struct curseg_info *seg_i;
1151 int written_size = 0;
1154 page = grab_meta_page(sbi, blkaddr++);
1155 kaddr = (unsigned char *)page_address(page);
1157 /* Step 1: write nat cache */
1158 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1159 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1160 written_size += SUM_JOURNAL_SIZE;
1162 /* Step 2: write sit cache */
1163 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1164 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1166 written_size += SUM_JOURNAL_SIZE;
1168 set_page_dirty(page);
1170 /* Step 3: write summary entries */
1171 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1172 unsigned short blkoff;
1173 seg_i = CURSEG_I(sbi, i);
1174 if (sbi->ckpt->alloc_type[i] == SSR)
1175 blkoff = sbi->blocks_per_seg;
1177 blkoff = curseg_blkoff(sbi, i);
1179 for (j = 0; j < blkoff; j++) {
1181 page = grab_meta_page(sbi, blkaddr++);
1182 kaddr = (unsigned char *)page_address(page);
1185 summary = (struct f2fs_summary *)(kaddr + written_size);
1186 *summary = seg_i->sum_blk->entries[j];
1187 written_size += SUMMARY_SIZE;
1188 set_page_dirty(page);
1190 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1194 f2fs_put_page(page, 1);
1199 f2fs_put_page(page, 1);
1202 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1203 block_t blkaddr, int type)
1206 if (IS_DATASEG(type))
1207 end = type + NR_CURSEG_DATA_TYPE;
1209 end = type + NR_CURSEG_NODE_TYPE;
1211 for (i = type; i < end; i++) {
1212 struct curseg_info *sum = CURSEG_I(sbi, i);
1213 mutex_lock(&sum->curseg_mutex);
1214 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1215 mutex_unlock(&sum->curseg_mutex);
1219 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1221 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1222 write_compacted_summaries(sbi, start_blk);
1224 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1227 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1229 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
1230 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1234 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1235 unsigned int val, int alloc)
1239 if (type == NAT_JOURNAL) {
1240 for (i = 0; i < nats_in_cursum(sum); i++) {
1241 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1244 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1245 return update_nats_in_cursum(sum, 1);
1246 } else if (type == SIT_JOURNAL) {
1247 for (i = 0; i < sits_in_cursum(sum); i++)
1248 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1250 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1251 return update_sits_in_cursum(sum, 1);
1256 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1259 struct sit_info *sit_i = SIT_I(sbi);
1260 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1261 block_t blk_addr = sit_i->sit_base_addr + offset;
1263 check_seg_range(sbi, segno);
1265 /* calculate sit block address */
1266 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1267 blk_addr += sit_i->sit_blocks;
1269 return get_meta_page(sbi, blk_addr);
1272 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1275 struct sit_info *sit_i = SIT_I(sbi);
1276 struct page *src_page, *dst_page;
1277 pgoff_t src_off, dst_off;
1278 void *src_addr, *dst_addr;
1280 src_off = current_sit_addr(sbi, start);
1281 dst_off = next_sit_addr(sbi, src_off);
1283 /* get current sit block page without lock */
1284 src_page = get_meta_page(sbi, src_off);
1285 dst_page = grab_meta_page(sbi, dst_off);
1286 BUG_ON(PageDirty(src_page));
1288 src_addr = page_address(src_page);
1289 dst_addr = page_address(dst_page);
1290 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1292 set_page_dirty(dst_page);
1293 f2fs_put_page(src_page, 1);
1295 set_to_next_sit(sit_i, start);
1300 static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1302 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1303 struct f2fs_summary_block *sum = curseg->sum_blk;
1307 * If the journal area in the current summary is full of sit entries,
1308 * all the sit entries will be flushed. Otherwise the sit entries
1309 * are not able to replace with newly hot sit entries.
1311 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1312 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1314 segno = le32_to_cpu(segno_in_journal(sum, i));
1315 __mark_sit_entry_dirty(sbi, segno);
1317 update_sits_in_cursum(sum, -sits_in_cursum(sum));
1324 * CP calls this function, which flushes SIT entries including sit_journal,
1325 * and moves prefree segs to free segs.
1327 void flush_sit_entries(struct f2fs_sb_info *sbi)
1329 struct sit_info *sit_i = SIT_I(sbi);
1330 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1331 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1332 struct f2fs_summary_block *sum = curseg->sum_blk;
1333 unsigned long nsegs = TOTAL_SEGS(sbi);
1334 struct page *page = NULL;
1335 struct f2fs_sit_block *raw_sit = NULL;
1336 unsigned int start = 0, end = 0;
1337 unsigned int segno = -1;
1340 mutex_lock(&curseg->curseg_mutex);
1341 mutex_lock(&sit_i->sentry_lock);
1344 * "flushed" indicates whether sit entries in journal are flushed
1345 * to the SIT area or not.
1347 flushed = flush_sits_in_journal(sbi);
1349 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1350 struct seg_entry *se = get_seg_entry(sbi, segno);
1351 int sit_offset, offset;
1353 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1358 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1360 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1361 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1365 if (!page || (start > segno) || (segno > end)) {
1367 f2fs_put_page(page, 1);
1371 start = START_SEGNO(sit_i, segno);
1372 end = start + SIT_ENTRY_PER_BLOCK - 1;
1374 /* read sit block that will be updated */
1375 page = get_next_sit_page(sbi, start);
1376 raw_sit = page_address(page);
1379 /* udpate entry in SIT block */
1380 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1382 __clear_bit(segno, bitmap);
1383 sit_i->dirty_sentries--;
1385 mutex_unlock(&sit_i->sentry_lock);
1386 mutex_unlock(&curseg->curseg_mutex);
1388 /* writeout last modified SIT block */
1389 f2fs_put_page(page, 1);
1391 set_prefree_as_free_segments(sbi);
1394 static int build_sit_info(struct f2fs_sb_info *sbi)
1396 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1397 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1398 struct sit_info *sit_i;
1399 unsigned int sit_segs, start;
1400 char *src_bitmap, *dst_bitmap;
1401 unsigned int bitmap_size;
1403 /* allocate memory for SIT information */
1404 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1408 SM_I(sbi)->sit_info = sit_i;
1410 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1411 if (!sit_i->sentries)
1414 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1415 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1416 if (!sit_i->dirty_sentries_bitmap)
1419 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1420 sit_i->sentries[start].cur_valid_map
1421 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1422 sit_i->sentries[start].ckpt_valid_map
1423 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1424 if (!sit_i->sentries[start].cur_valid_map
1425 || !sit_i->sentries[start].ckpt_valid_map)
1429 if (sbi->segs_per_sec > 1) {
1430 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
1431 sizeof(struct sec_entry));
1432 if (!sit_i->sec_entries)
1436 /* get information related with SIT */
1437 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1439 /* setup SIT bitmap from ckeckpoint pack */
1440 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1441 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1443 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
1447 /* init SIT information */
1448 sit_i->s_ops = &default_salloc_ops;
1450 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1451 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1452 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1453 sit_i->sit_bitmap = dst_bitmap;
1454 sit_i->bitmap_size = bitmap_size;
1455 sit_i->dirty_sentries = 0;
1456 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1457 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1458 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1459 mutex_init(&sit_i->sentry_lock);
1463 static int build_free_segmap(struct f2fs_sb_info *sbi)
1465 struct f2fs_sm_info *sm_info = SM_I(sbi);
1466 struct free_segmap_info *free_i;
1467 unsigned int bitmap_size, sec_bitmap_size;
1469 /* allocate memory for free segmap information */
1470 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1474 SM_I(sbi)->free_info = free_i;
1476 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1477 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1478 if (!free_i->free_segmap)
1481 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1482 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1483 if (!free_i->free_secmap)
1486 /* set all segments as dirty temporarily */
1487 memset(free_i->free_segmap, 0xff, bitmap_size);
1488 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1490 /* init free segmap information */
1491 free_i->start_segno =
1492 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1493 free_i->free_segments = 0;
1494 free_i->free_sections = 0;
1495 rwlock_init(&free_i->segmap_lock);
1499 static int build_curseg(struct f2fs_sb_info *sbi)
1501 struct curseg_info *array;
1504 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1508 SM_I(sbi)->curseg_array = array;
1510 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1511 mutex_init(&array[i].curseg_mutex);
1512 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1513 if (!array[i].sum_blk)
1515 array[i].segno = NULL_SEGNO;
1516 array[i].next_blkoff = 0;
1518 return restore_curseg_summaries(sbi);
1521 static void build_sit_entries(struct f2fs_sb_info *sbi)
1523 struct sit_info *sit_i = SIT_I(sbi);
1524 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1525 struct f2fs_summary_block *sum = curseg->sum_blk;
1528 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1529 struct seg_entry *se = &sit_i->sentries[start];
1530 struct f2fs_sit_block *sit_blk;
1531 struct f2fs_sit_entry sit;
1535 mutex_lock(&curseg->curseg_mutex);
1536 for (i = 0; i < sits_in_cursum(sum); i++) {
1537 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1538 sit = sit_in_journal(sum, i);
1539 mutex_unlock(&curseg->curseg_mutex);
1543 mutex_unlock(&curseg->curseg_mutex);
1544 page = get_current_sit_page(sbi, start);
1545 sit_blk = (struct f2fs_sit_block *)page_address(page);
1546 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1547 f2fs_put_page(page, 1);
1549 check_block_count(sbi, start, &sit);
1550 seg_info_from_raw_sit(se, &sit);
1551 if (sbi->segs_per_sec > 1) {
1552 struct sec_entry *e = get_sec_entry(sbi, start);
1553 e->valid_blocks += se->valid_blocks;
1558 static void init_free_segmap(struct f2fs_sb_info *sbi)
1563 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1564 struct seg_entry *sentry = get_seg_entry(sbi, start);
1565 if (!sentry->valid_blocks)
1566 __set_free(sbi, start);
1569 /* set use the current segments */
1570 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1571 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1572 __set_test_and_inuse(sbi, curseg_t->segno);
1576 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1578 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1579 struct free_segmap_info *free_i = FREE_I(sbi);
1580 unsigned int segno = 0, offset = 0;
1581 unsigned short valid_blocks;
1583 while (segno < TOTAL_SEGS(sbi)) {
1584 /* find dirty segment based on free segmap */
1585 segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset);
1586 if (segno >= TOTAL_SEGS(sbi))
1589 valid_blocks = get_valid_blocks(sbi, segno, 0);
1590 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1592 mutex_lock(&dirty_i->seglist_lock);
1593 __locate_dirty_segment(sbi, segno, DIRTY);
1594 mutex_unlock(&dirty_i->seglist_lock);
1598 static int init_victim_secmap(struct f2fs_sb_info *sbi)
1600 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1601 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1603 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1604 if (!dirty_i->victim_secmap)
1609 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1611 struct dirty_seglist_info *dirty_i;
1612 unsigned int bitmap_size, i;
1614 /* allocate memory for dirty segments list information */
1615 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1619 SM_I(sbi)->dirty_info = dirty_i;
1620 mutex_init(&dirty_i->seglist_lock);
1622 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1624 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1625 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
1626 if (!dirty_i->dirty_segmap[i])
1630 init_dirty_segmap(sbi);
1631 return init_victim_secmap(sbi);
1635 * Update min, max modified time for cost-benefit GC algorithm
1637 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1639 struct sit_info *sit_i = SIT_I(sbi);
1642 mutex_lock(&sit_i->sentry_lock);
1644 sit_i->min_mtime = LLONG_MAX;
1646 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1648 unsigned long long mtime = 0;
1650 for (i = 0; i < sbi->segs_per_sec; i++)
1651 mtime += get_seg_entry(sbi, segno + i)->mtime;
1653 mtime = div_u64(mtime, sbi->segs_per_sec);
1655 if (sit_i->min_mtime > mtime)
1656 sit_i->min_mtime = mtime;
1658 sit_i->max_mtime = get_mtime(sbi);
1659 mutex_unlock(&sit_i->sentry_lock);
1662 int build_segment_manager(struct f2fs_sb_info *sbi)
1664 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1665 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1666 struct f2fs_sm_info *sm_info;
1669 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1674 sbi->sm_info = sm_info;
1675 INIT_LIST_HEAD(&sm_info->wblist_head);
1676 spin_lock_init(&sm_info->wblist_lock);
1677 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1678 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1679 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1680 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1681 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1682 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1683 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1685 err = build_sit_info(sbi);
1688 err = build_free_segmap(sbi);
1691 err = build_curseg(sbi);
1695 /* reinit free segmap based on SIT */
1696 build_sit_entries(sbi);
1698 init_free_segmap(sbi);
1699 err = build_dirty_segmap(sbi);
1703 init_min_max_mtime(sbi);
1707 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1708 enum dirty_type dirty_type)
1710 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1712 mutex_lock(&dirty_i->seglist_lock);
1713 kfree(dirty_i->dirty_segmap[dirty_type]);
1714 dirty_i->nr_dirty[dirty_type] = 0;
1715 mutex_unlock(&dirty_i->seglist_lock);
1718 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
1720 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1721 kfree(dirty_i->victim_secmap);
1724 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1726 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1732 /* discard pre-free/dirty segments list */
1733 for (i = 0; i < NR_DIRTY_TYPE; i++)
1734 discard_dirty_segmap(sbi, i);
1736 destroy_victim_secmap(sbi);
1737 SM_I(sbi)->dirty_info = NULL;
1741 static void destroy_curseg(struct f2fs_sb_info *sbi)
1743 struct curseg_info *array = SM_I(sbi)->curseg_array;
1748 SM_I(sbi)->curseg_array = NULL;
1749 for (i = 0; i < NR_CURSEG_TYPE; i++)
1750 kfree(array[i].sum_blk);
1754 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1756 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1759 SM_I(sbi)->free_info = NULL;
1760 kfree(free_i->free_segmap);
1761 kfree(free_i->free_secmap);
1765 static void destroy_sit_info(struct f2fs_sb_info *sbi)
1767 struct sit_info *sit_i = SIT_I(sbi);
1773 if (sit_i->sentries) {
1774 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1775 kfree(sit_i->sentries[start].cur_valid_map);
1776 kfree(sit_i->sentries[start].ckpt_valid_map);
1779 vfree(sit_i->sentries);
1780 vfree(sit_i->sec_entries);
1781 kfree(sit_i->dirty_sentries_bitmap);
1783 SM_I(sbi)->sit_info = NULL;
1784 kfree(sit_i->sit_bitmap);
1788 void destroy_segment_manager(struct f2fs_sb_info *sbi)
1790 struct f2fs_sm_info *sm_info = SM_I(sbi);
1791 destroy_dirty_segmap(sbi);
1792 destroy_curseg(sbi);
1793 destroy_free_segmap(sbi);
1794 destroy_sit_info(sbi);
1795 sbi->sm_info = NULL;