4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/blkdev.h>
14 #define NULL_SEGNO ((unsigned int)(~0))
15 #define NULL_SECNO ((unsigned int)(~0))
17 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
19 /* L: Logical segment # in volume, R: Relative segment # in main area */
20 #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
21 #define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
23 #define IS_DATASEG(t) (t <= CURSEG_COLD_DATA)
24 #define IS_NODESEG(t) (t >= CURSEG_HOT_NODE)
26 #define IS_CURSEG(sbi, seg) \
27 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
28 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
29 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
30 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
31 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
32 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
34 #define IS_CURSEC(sbi, secno) \
35 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
36 sbi->segs_per_sec) || \
37 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
38 sbi->segs_per_sec) || \
39 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
40 sbi->segs_per_sec) || \
41 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
42 sbi->segs_per_sec) || \
43 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
44 sbi->segs_per_sec) || \
45 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
48 #define START_BLOCK(sbi, segno) \
49 (SM_I(sbi)->seg0_blkaddr + \
50 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
51 #define NEXT_FREE_BLKADDR(sbi, curseg) \
52 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
54 #define MAIN_BASE_BLOCK(sbi) (SM_I(sbi)->main_blkaddr)
56 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) \
57 ((blk_addr) - SM_I(sbi)->seg0_blkaddr)
58 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
59 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
60 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
61 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
63 #define GET_SEGNO(sbi, blk_addr) \
64 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
65 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
66 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
67 #define GET_SECNO(sbi, segno) \
68 ((segno) / sbi->segs_per_sec)
69 #define GET_ZONENO_FROM_SEGNO(sbi, segno) \
70 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
72 #define GET_SUM_BLOCK(sbi, segno) \
73 ((sbi->sm_info->ssa_blkaddr) + segno)
75 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
76 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
78 #define SIT_ENTRY_OFFSET(sit_i, segno) \
79 (segno % sit_i->sents_per_block)
80 #define SIT_BLOCK_OFFSET(segno) \
81 (segno / SIT_ENTRY_PER_BLOCK)
82 #define START_SEGNO(segno) \
83 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
84 #define SIT_BLK_CNT(sbi) \
85 ((TOTAL_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
86 #define f2fs_bitmap_size(nr) \
87 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
88 #define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)
89 #define TOTAL_SECS(sbi) (sbi->total_sections)
91 #define SECTOR_FROM_BLOCK(sbi, blk_addr) \
92 (((sector_t)blk_addr) << (sbi)->log_sectors_per_block)
93 #define SECTOR_TO_BLOCK(sbi, sectors) \
94 (sectors >> (sbi)->log_sectors_per_block)
95 #define MAX_BIO_BLOCKS(max_hw_blocks) \
96 (min((int)max_hw_blocks, BIO_MAX_PAGES))
99 * indicate a block allocation direction: RIGHT and LEFT.
100 * RIGHT means allocating new sections towards the end of volume.
101 * LEFT means the opposite direction.
109 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
110 * LFS writes data sequentially with cleaning operations.
111 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
119 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
120 * GC_CB is based on cost-benefit algorithm.
121 * GC_GREEDY is based on greedy algorithm.
129 * BG_GC means the background cleaning job.
130 * FG_GC means the on-demand cleaning job.
137 /* for a function parameter to select a victim segment */
138 struct victim_sel_policy {
139 int alloc_mode; /* LFS or SSR */
140 int gc_mode; /* GC_CB or GC_GREEDY */
141 unsigned long *dirty_segmap; /* dirty segment bitmap */
142 unsigned int max_search; /* maximum # of segments to search */
143 unsigned int offset; /* last scanned bitmap offset */
144 unsigned int ofs_unit; /* bitmap search unit */
145 unsigned int min_cost; /* minimum cost */
146 unsigned int min_segno; /* segment # having min. cost */
150 unsigned short valid_blocks; /* # of valid blocks */
151 unsigned char *cur_valid_map; /* validity bitmap of blocks */
153 * # of valid blocks and the validity bitmap stored in the the last
154 * checkpoint pack. This information is used by the SSR mode.
156 unsigned short ckpt_valid_blocks;
157 unsigned char *ckpt_valid_map;
158 unsigned char type; /* segment type like CURSEG_XXX_TYPE */
159 unsigned long long mtime; /* modification time of the segment */
163 unsigned int valid_blocks; /* # of valid blocks in a section */
166 struct segment_allocation {
167 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
171 const struct segment_allocation *s_ops;
173 block_t sit_base_addr; /* start block address of SIT area */
174 block_t sit_blocks; /* # of blocks used by SIT area */
175 block_t written_valid_blocks; /* # of valid blocks in main area */
176 char *sit_bitmap; /* SIT bitmap pointer */
177 unsigned int bitmap_size; /* SIT bitmap size */
179 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
180 unsigned int dirty_sentries; /* # of dirty sentries */
181 unsigned int sents_per_block; /* # of SIT entries per block */
182 struct mutex sentry_lock; /* to protect SIT cache */
183 struct seg_entry *sentries; /* SIT segment-level cache */
184 struct sec_entry *sec_entries; /* SIT section-level cache */
186 /* for cost-benefit algorithm in cleaning procedure */
187 unsigned long long elapsed_time; /* elapsed time after mount */
188 unsigned long long mounted_time; /* mount time */
189 unsigned long long min_mtime; /* min. modification time */
190 unsigned long long max_mtime; /* max. modification time */
193 struct free_segmap_info {
194 unsigned int start_segno; /* start segment number logically */
195 unsigned int free_segments; /* # of free segments */
196 unsigned int free_sections; /* # of free sections */
197 rwlock_t segmap_lock; /* free segmap lock */
198 unsigned long *free_segmap; /* free segment bitmap */
199 unsigned long *free_secmap; /* free section bitmap */
202 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
204 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
205 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
206 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
207 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
208 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
209 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
210 DIRTY, /* to count # of dirty segments */
211 PRE, /* to count # of entirely obsolete segments */
215 struct dirty_seglist_info {
216 const struct victim_selection *v_ops; /* victim selction operation */
217 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
218 struct mutex seglist_lock; /* lock for segment bitmaps */
219 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
220 unsigned long *victim_secmap; /* background GC victims */
223 /* victim selection function for cleaning and SSR */
224 struct victim_selection {
225 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
229 /* for active log information */
231 struct mutex curseg_mutex; /* lock for consistency */
232 struct f2fs_summary_block *sum_blk; /* cached summary block */
233 unsigned char alloc_type; /* current allocation type */
234 unsigned int segno; /* current segment number */
235 unsigned short next_blkoff; /* next block offset to write */
236 unsigned int zone; /* current zone number */
237 unsigned int next_segno; /* preallocated segment */
240 struct sit_entry_set {
241 struct list_head set_list; /* link with all sit sets */
242 unsigned int start_segno; /* start segno of sits in set */
243 unsigned int entry_cnt; /* the # of sit entries in set */
249 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
251 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
254 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
257 struct sit_info *sit_i = SIT_I(sbi);
258 return &sit_i->sentries[segno];
261 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
264 struct sit_info *sit_i = SIT_I(sbi);
265 return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
268 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
269 unsigned int segno, int section)
272 * In order to get # of valid blocks in a section instantly from many
273 * segments, f2fs manages two counting structures separately.
276 return get_sec_entry(sbi, segno)->valid_blocks;
278 return get_seg_entry(sbi, segno)->valid_blocks;
281 static inline void seg_info_from_raw_sit(struct seg_entry *se,
282 struct f2fs_sit_entry *rs)
284 se->valid_blocks = GET_SIT_VBLOCKS(rs);
285 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
286 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
287 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
288 se->type = GET_SIT_TYPE(rs);
289 se->mtime = le64_to_cpu(rs->mtime);
292 static inline void seg_info_to_raw_sit(struct seg_entry *se,
293 struct f2fs_sit_entry *rs)
295 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
297 rs->vblocks = cpu_to_le16(raw_vblocks);
298 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
299 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
300 se->ckpt_valid_blocks = se->valid_blocks;
301 rs->mtime = cpu_to_le64(se->mtime);
304 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
305 unsigned int max, unsigned int segno)
308 read_lock(&free_i->segmap_lock);
309 ret = find_next_bit(free_i->free_segmap, max, segno);
310 read_unlock(&free_i->segmap_lock);
314 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
316 struct free_segmap_info *free_i = FREE_I(sbi);
317 unsigned int secno = segno / sbi->segs_per_sec;
318 unsigned int start_segno = secno * sbi->segs_per_sec;
321 write_lock(&free_i->segmap_lock);
322 clear_bit(segno, free_i->free_segmap);
323 free_i->free_segments++;
325 next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), start_segno);
326 if (next >= start_segno + sbi->segs_per_sec) {
327 clear_bit(secno, free_i->free_secmap);
328 free_i->free_sections++;
330 write_unlock(&free_i->segmap_lock);
333 static inline void __set_inuse(struct f2fs_sb_info *sbi,
336 struct free_segmap_info *free_i = FREE_I(sbi);
337 unsigned int secno = segno / sbi->segs_per_sec;
338 set_bit(segno, free_i->free_segmap);
339 free_i->free_segments--;
340 if (!test_and_set_bit(secno, free_i->free_secmap))
341 free_i->free_sections--;
344 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
347 struct free_segmap_info *free_i = FREE_I(sbi);
348 unsigned int secno = segno / sbi->segs_per_sec;
349 unsigned int start_segno = secno * sbi->segs_per_sec;
352 write_lock(&free_i->segmap_lock);
353 if (test_and_clear_bit(segno, free_i->free_segmap)) {
354 free_i->free_segments++;
356 next = find_next_bit(free_i->free_segmap,
357 start_segno + sbi->segs_per_sec, start_segno);
358 if (next >= start_segno + sbi->segs_per_sec) {
359 if (test_and_clear_bit(secno, free_i->free_secmap))
360 free_i->free_sections++;
363 write_unlock(&free_i->segmap_lock);
366 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
369 struct free_segmap_info *free_i = FREE_I(sbi);
370 unsigned int secno = segno / sbi->segs_per_sec;
371 write_lock(&free_i->segmap_lock);
372 if (!test_and_set_bit(segno, free_i->free_segmap)) {
373 free_i->free_segments--;
374 if (!test_and_set_bit(secno, free_i->free_secmap))
375 free_i->free_sections--;
377 write_unlock(&free_i->segmap_lock);
380 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
383 struct sit_info *sit_i = SIT_I(sbi);
384 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
387 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
389 return SIT_I(sbi)->written_valid_blocks;
392 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
394 return FREE_I(sbi)->free_segments;
397 static inline int reserved_segments(struct f2fs_sb_info *sbi)
399 return SM_I(sbi)->reserved_segments;
402 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
404 return FREE_I(sbi)->free_sections;
407 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
409 return DIRTY_I(sbi)->nr_dirty[PRE];
412 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
414 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
415 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
416 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
417 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
418 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
419 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
422 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
424 return SM_I(sbi)->ovp_segments;
427 static inline int overprovision_sections(struct f2fs_sb_info *sbi)
429 return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
432 static inline int reserved_sections(struct f2fs_sb_info *sbi)
434 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
437 static inline bool need_SSR(struct f2fs_sb_info *sbi)
439 return (prefree_segments(sbi) / sbi->segs_per_sec)
440 + free_sections(sbi) < overprovision_sections(sbi);
443 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
445 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
446 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
448 if (unlikely(sbi->por_doing))
451 return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
452 reserved_sections(sbi));
455 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
457 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
460 static inline int utilization(struct f2fs_sb_info *sbi)
462 return div_u64((u64)valid_user_blocks(sbi) * 100,
463 sbi->user_block_count);
467 * Sometimes f2fs may be better to drop out-of-place update policy.
468 * And, users can control the policy through sysfs entries.
469 * There are five policies with triggering conditions as follows.
470 * F2FS_IPU_FORCE - all the time,
471 * F2FS_IPU_SSR - if SSR mode is activated,
472 * F2FS_IPU_UTIL - if FS utilization is over threashold,
473 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
475 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
476 * storages. IPU will be triggered only if the # of dirty
477 * pages over min_fsync_blocks.
478 * F2FS_IPUT_DISABLE - disable IPU. (=default option)
480 #define DEF_MIN_IPU_UTIL 70
481 #define DEF_MIN_FSYNC_BLOCKS 8
492 static inline bool need_inplace_update(struct inode *inode)
494 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
496 /* IPU can be done only for the user data */
497 if (S_ISDIR(inode->i_mode))
500 switch (SM_I(sbi)->ipu_policy) {
508 if (utilization(sbi) > SM_I(sbi)->min_ipu_util)
511 case F2FS_IPU_SSR_UTIL:
512 if (need_SSR(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
516 /* this is only set during fdatasync */
517 if (is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
520 case F2FS_IPU_DISABLE:
526 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
529 struct curseg_info *curseg = CURSEG_I(sbi, type);
530 return curseg->segno;
533 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
536 struct curseg_info *curseg = CURSEG_I(sbi, type);
537 return curseg->alloc_type;
540 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
542 struct curseg_info *curseg = CURSEG_I(sbi, type);
543 return curseg->next_blkoff;
546 #ifdef CONFIG_F2FS_CHECK_FS
547 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
549 unsigned int end_segno = SM_I(sbi)->segment_count - 1;
550 BUG_ON(segno > end_segno);
553 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
555 struct f2fs_sm_info *sm_info = SM_I(sbi);
556 block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg;
557 block_t start_addr = sm_info->seg0_blkaddr;
558 block_t end_addr = start_addr + total_blks - 1;
559 BUG_ON(blk_addr < start_addr);
560 BUG_ON(blk_addr > end_addr);
564 * Summary block is always treated as an invalid block
566 static inline void check_block_count(struct f2fs_sb_info *sbi,
567 int segno, struct f2fs_sit_entry *raw_sit)
569 struct f2fs_sm_info *sm_info = SM_I(sbi);
570 unsigned int end_segno = sm_info->segment_count - 1;
571 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
572 int valid_blocks = 0;
573 int cur_pos = 0, next_pos;
575 /* check segment usage */
576 BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
578 /* check boundary of a given segment number */
579 BUG_ON(segno > end_segno);
581 /* check bitmap with valid block count */
584 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
587 valid_blocks += next_pos - cur_pos;
589 next_pos = find_next_bit_le(&raw_sit->valid_map,
593 is_valid = !is_valid;
594 } while (cur_pos < sbi->blocks_per_seg);
595 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
598 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
600 unsigned int end_segno = SM_I(sbi)->segment_count - 1;
602 if (segno > end_segno)
603 sbi->need_fsck = true;
606 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
608 struct f2fs_sm_info *sm_info = SM_I(sbi);
609 block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg;
610 block_t start_addr = sm_info->seg0_blkaddr;
611 block_t end_addr = start_addr + total_blks - 1;
613 if (blk_addr < start_addr || blk_addr > end_addr)
614 sbi->need_fsck = true;
618 * Summary block is always treated as an invalid block
620 static inline void check_block_count(struct f2fs_sb_info *sbi,
621 int segno, struct f2fs_sit_entry *raw_sit)
623 unsigned int end_segno = SM_I(sbi)->segment_count - 1;
625 /* check segment usage */
626 if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
627 sbi->need_fsck = true;
629 /* check boundary of a given segment number */
630 if (segno > end_segno)
631 sbi->need_fsck = true;
635 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
638 struct sit_info *sit_i = SIT_I(sbi);
639 unsigned int offset = SIT_BLOCK_OFFSET(start);
640 block_t blk_addr = sit_i->sit_base_addr + offset;
642 check_seg_range(sbi, start);
644 /* calculate sit block address */
645 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
646 blk_addr += sit_i->sit_blocks;
651 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
654 struct sit_info *sit_i = SIT_I(sbi);
655 block_addr -= sit_i->sit_base_addr;
656 if (block_addr < sit_i->sit_blocks)
657 block_addr += sit_i->sit_blocks;
659 block_addr -= sit_i->sit_blocks;
661 return block_addr + sit_i->sit_base_addr;
664 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
666 unsigned int block_off = SIT_BLOCK_OFFSET(start);
668 if (f2fs_test_bit(block_off, sit_i->sit_bitmap))
669 f2fs_clear_bit(block_off, sit_i->sit_bitmap);
671 f2fs_set_bit(block_off, sit_i->sit_bitmap);
674 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
676 struct sit_info *sit_i = SIT_I(sbi);
677 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
681 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
682 unsigned int ofs_in_node, unsigned char version)
684 sum->nid = cpu_to_le32(nid);
685 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
686 sum->version = version;
689 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
691 return __start_cp_addr(sbi) +
692 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
695 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
697 return __start_cp_addr(sbi) +
698 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
702 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
704 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
709 static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
711 struct block_device *bdev = sbi->sb->s_bdev;
712 struct request_queue *q = bdev_get_queue(bdev);
713 return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q));
717 * It is very important to gather dirty pages and write at once, so that we can
718 * submit a big bio without interfering other data writes.
719 * By default, 512 pages for directory data,
720 * 512 pages (2MB) * 3 for three types of nodes, and
721 * max_bio_blocks for meta are set.
723 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
726 return sbi->blocks_per_seg;
727 else if (type == NODE)
728 return 3 * sbi->blocks_per_seg;
729 else if (type == META)
730 return MAX_BIO_BLOCKS(max_hw_blocks(sbi));
736 * When writing pages, it'd better align nr_to_write for segment size.
738 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
739 struct writeback_control *wbc)
741 long nr_to_write, desired;
743 if (wbc->sync_mode != WB_SYNC_NONE)
746 nr_to_write = wbc->nr_to_write;
750 else if (type == NODE)
751 desired = 3 * max_hw_blocks(sbi);
753 desired = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
755 wbc->nr_to_write = desired;
756 return desired - nr_to_write;