2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
4 /* Reiserfs block (de)allocator, bitmap-based. */
6 #include <linux/time.h>
8 #include <linux/errno.h>
9 #include <linux/buffer_head.h>
10 #include <linux/kernel.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/quotaops.h>
14 #include <linux/seq_file.h>
16 #define PREALLOCATION_SIZE 9
18 /* different reiserfs block allocator options */
20 #define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits)
22 #define _ALLOC_concentrating_formatted_nodes 0
23 #define _ALLOC_displacing_large_files 1
24 #define _ALLOC_displacing_new_packing_localities 2
25 #define _ALLOC_old_hashed_relocation 3
26 #define _ALLOC_new_hashed_relocation 4
27 #define _ALLOC_skip_busy 5
28 #define _ALLOC_displace_based_on_dirid 6
29 #define _ALLOC_hashed_formatted_nodes 7
30 #define _ALLOC_old_way 8
31 #define _ALLOC_hundredth_slices 9
32 #define _ALLOC_dirid_groups 10
33 #define _ALLOC_oid_groups 11
34 #define _ALLOC_packing_groups 12
36 #define concentrating_formatted_nodes(s) test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s))
37 #define displacing_large_files(s) test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s))
38 #define displacing_new_packing_localities(s) test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s))
40 #define SET_OPTION(optname) \
42 reiserfs_info(s, "block allocator option \"%s\" is set", #optname); \
43 set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
45 #define TEST_OPTION(optname, s) \
46 test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
48 static inline void get_bit_address(struct super_block *s,
50 unsigned int *bmap_nr,
53 /* It is in the bitmap block number equal to the block
54 * number divided by the number of bits in a block. */
55 *bmap_nr = block >> (s->s_blocksize_bits + 3);
56 /* Within that bitmap block it is located at bit offset *offset. */
57 *offset = block & ((s->s_blocksize << 3) - 1);
60 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
62 unsigned int bmap, offset;
63 unsigned int bmap_count = reiserfs_bmap_count(s);
65 if (block == 0 || block >= SB_BLOCK_COUNT(s)) {
66 reiserfs_error(s, "vs-4010",
67 "block number is out of range %lu (%u)",
68 block, SB_BLOCK_COUNT(s));
72 get_bit_address(s, block, &bmap, &offset);
74 /* Old format filesystem? Unlikely, but the bitmaps are all up front so
75 * we need to account for it. */
76 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
77 &(REISERFS_SB(s)->s_properties)))) {
78 b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
80 block <= bmap1 + bmap_count) {
81 reiserfs_error(s, "vs-4019", "bitmap block %lu(%u) "
82 "can't be freed or reused",
88 reiserfs_error(s, "vs-4020", "bitmap block %lu(%u) "
89 "can't be freed or reused",
95 if (bmap >= bmap_count) {
96 reiserfs_error(s, "vs-4030", "bitmap for requested block "
97 "is out of range: block=%lu, bitmap_nr=%u",
102 if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) {
103 reiserfs_error(s, "vs-4050", "this is root block (%u), "
104 "it must be busy", SB_ROOT_BLOCK(s));
111 /* searches in journal structures for a given block number (bmap, off). If block
112 is found in reiserfs journal it suggests next free block candidate to test. */
113 static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
118 if (reiserfs_in_journal(s, bmap, off, 1, &tmp)) {
119 if (tmp) { /* hint supplied */
121 PROC_INFO_INC(s, scan_bitmap.in_journal_hint);
123 (*next) = off + 1; /* inc offset to avoid looping. */
124 PROC_INFO_INC(s, scan_bitmap.in_journal_nohint);
126 PROC_INFO_INC(s, scan_bitmap.retry);
132 /* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
134 static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
135 unsigned int bmap_n, int *beg, int boundary,
136 int min, int max, int unfm)
138 struct super_block *s = th->t_super;
139 struct reiserfs_bitmap_info *bi = &SB_AP_BITMAP(s)[bmap_n];
140 struct buffer_head *bh;
144 BUG_ON(!th->t_trans_id);
145 RFALSE(bmap_n >= reiserfs_bmap_count(s), "Bitmap %u is out of "
146 "range (0..%u)", bmap_n, reiserfs_bmap_count(s) - 1);
147 PROC_INFO_INC(s, scan_bitmap.bmap);
148 /* this is unclear and lacks comments, explain how journal bitmaps
149 work here for the reader. Convey a sense of the design here. What
151 /* - I mean `a window of zero bits' as in description of this function - Zam. */
154 reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer "
155 "for bitmap %d", bmap_n);
159 bh = reiserfs_read_bitmap_block(s, bmap_n);
165 if (bi->free_count < min) {
167 return 0; // No free blocks in this bitmap
170 /* search for a first zero bit -- beginning of a window */
171 *beg = reiserfs_find_next_zero_le_bit
172 ((unsigned long *)(bh->b_data), boundary, *beg);
174 if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block
175 * cannot contain a zero window of minimum size */
180 if (unfm && is_block_in_journal(s, bmap_n, *beg, beg))
182 /* first zero bit found; we check next bits */
183 for (end = *beg + 1;; end++) {
184 if (end >= *beg + max || end >= boundary
185 || reiserfs_test_le_bit(end, bh->b_data)) {
189 /* finding the other end of zero bit window requires looking into journal structures (in
190 * case of searching for free blocks for unformatted nodes) */
191 if (unfm && is_block_in_journal(s, bmap_n, end, &next))
195 /* now (*beg) points to beginning of zero bits window,
196 * (end) points to one bit after the window end */
197 if (end - *beg >= min) { /* it seems we have found window of proper size */
199 reiserfs_prepare_for_journal(s, bh, 1);
200 /* try to set all blocks used checking are they still free */
201 for (i = *beg; i < end; i++) {
202 /* It seems that we should not check in journal again. */
203 if (reiserfs_test_and_set_le_bit
205 /* bit was set by another process
206 * while we slept in prepare_for_journal() */
207 PROC_INFO_INC(s, scan_bitmap.stolen);
208 if (i >= *beg + min) { /* we can continue with smaller set of allocated blocks,
209 * if length of this set is more or equal to `min' */
213 /* otherwise we clear all bit were set ... */
215 reiserfs_clear_le_bit
217 reiserfs_restore_prepared_buffer(s, bh);
219 /* ... and search again in current block from beginning */
223 bi->free_count -= (end - *beg);
224 journal_mark_dirty(th, s, bh);
227 /* free block count calculation */
228 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
230 PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
231 journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
240 static int bmap_hash_id(struct super_block *s, u32 id)
242 char *hash_in = NULL;
249 hash_in = (char *)(&id);
250 hash = keyed_hash(hash_in, 4);
251 bm = hash % reiserfs_bmap_count(s);
255 /* this can only be true when SB_BMAP_NR = 1 */
256 if (bm >= reiserfs_bmap_count(s))
262 * hashes the id and then returns > 0 if the block group for the
263 * corresponding hash is full
265 static inline int block_group_used(struct super_block *s, u32 id)
267 int bm = bmap_hash_id(s, id);
268 struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm];
270 /* If we don't have cached information on this bitmap block, we're
271 * going to have to load it later anyway. Loading it here allows us
272 * to make a better decision. This favors long-term performance gain
273 * with a better on-disk layout vs. a short term gain of skipping the
274 * read and potentially having a bad placement. */
275 if (info->free_count == UINT_MAX) {
276 struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm);
280 if (info->free_count > ((s->s_blocksize << 3) * 60 / 100)) {
287 * the packing is returned in disk byte order
289 __le32 reiserfs_choose_packing(struct inode * dir)
292 if (TEST_OPTION(packing_groups, dir->i_sb)) {
293 u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id);
295 * some versions of reiserfsck expect packing locality 1 to be
298 if (parent_dir == 1 || block_group_used(dir->i_sb, parent_dir))
299 packing = INODE_PKEY(dir)->k_objectid;
301 packing = INODE_PKEY(dir)->k_dir_id;
303 packing = INODE_PKEY(dir)->k_objectid;
307 /* Tries to find contiguous zero bit window (given size) in given region of
308 * bitmap and place new blocks there. Returns number of allocated blocks. */
309 static int scan_bitmap(struct reiserfs_transaction_handle *th,
310 b_blocknr_t * start, b_blocknr_t finish,
311 int min, int max, int unfm, sector_t file_block)
313 int nr_allocated = 0;
314 struct super_block *s = th->t_super;
315 /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
316 * - Hans, it is not a block number - Zam. */
318 unsigned int bm, off;
319 unsigned int end_bm, end_off;
320 unsigned int off_max = s->s_blocksize << 3;
322 BUG_ON(!th->t_trans_id);
323 PROC_INFO_INC(s, scan_bitmap.call);
324 if (SB_FREE_BLOCKS(s) <= 0)
325 return 0; // No point in looking for more free blocks
327 get_bit_address(s, *start, &bm, &off);
328 get_bit_address(s, finish, &end_bm, &end_off);
329 if (bm > reiserfs_bmap_count(s))
331 if (end_bm > reiserfs_bmap_count(s))
332 end_bm = reiserfs_bmap_count(s);
334 /* When the bitmap is more than 10% free, anyone can allocate.
335 * When it's less than 10% free, only files that already use the
336 * bitmap are allowed. Once we pass 80% full, this restriction
339 * We do this so that files that grow later still have space close to
340 * their original allocation. This improves locality, and presumably
341 * performance as a result.
343 * This is only an allocation policy and does not make up for getting a
344 * bad hint. Decent hinting must be implemented for this to work well.
346 if (TEST_OPTION(skip_busy, s)
347 && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s) / 20) {
348 for (; bm < end_bm; bm++, off = 0) {
349 if ((off && (!unfm || (file_block != 0)))
350 || SB_AP_BITMAP(s)[bm].free_count >
351 (s->s_blocksize << 3) / 10)
353 scan_bitmap_block(th, bm, &off, off_max,
358 /* we know from above that start is a reasonable number */
359 get_bit_address(s, *start, &bm, &off);
362 for (; bm < end_bm; bm++, off = 0) {
364 scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
370 scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);
373 *start = bm * off_max + off;
378 static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
379 struct inode *inode, b_blocknr_t block,
382 struct super_block *s = th->t_super;
383 struct reiserfs_super_block *rs;
384 struct buffer_head *sbh, *bmbh;
385 struct reiserfs_bitmap_info *apbi;
386 unsigned int nr, offset;
388 BUG_ON(!th->t_trans_id);
389 PROC_INFO_INC(s, free_block);
390 rs = SB_DISK_SUPER_BLOCK(s);
391 sbh = SB_BUFFER_WITH_SB(s);
392 apbi = SB_AP_BITMAP(s);
394 get_bit_address(s, block, &nr, &offset);
396 if (nr >= reiserfs_bmap_count(s)) {
397 reiserfs_error(s, "vs-4075", "block %lu is out of range",
402 bmbh = reiserfs_read_bitmap_block(s, nr);
406 reiserfs_prepare_for_journal(s, bmbh, 1);
408 /* clear bit for the given block in bit map */
409 if (!reiserfs_test_and_clear_le_bit(offset, bmbh->b_data)) {
410 reiserfs_error(s, "vs-4080",
411 "block %lu: bit already cleared", block);
413 apbi[nr].free_count++;
414 journal_mark_dirty(th, s, bmbh);
417 reiserfs_prepare_for_journal(s, sbh, 1);
418 /* update super block */
419 set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
421 journal_mark_dirty(th, s, sbh);
422 if (for_unformatted) {
423 int depth = reiserfs_write_unlock_nested(s);
424 dquot_free_block_nodirty(inode, 1);
425 reiserfs_write_lock_nested(s, depth);
429 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
430 struct inode *inode, b_blocknr_t block,
433 struct super_block *s = th->t_super;
435 BUG_ON(!th->t_trans_id);
436 RFALSE(!s, "vs-4061: trying to free block on nonexistent device");
437 if (!is_reusable(s, block, 1))
440 if (block > sb_block_count(REISERFS_SB(s)->s_rs)) {
441 reiserfs_error(th->t_super, "bitmap-4072",
442 "Trying to free block outside file system "
443 "boundaries (%lu > %lu)",
444 block, sb_block_count(REISERFS_SB(s)->s_rs));
447 /* mark it before we clear it, just in case */
448 journal_mark_freed(th, s, block);
449 _reiserfs_free_block(th, inode, block, for_unformatted);
452 /* preallocated blocks don't need to be run through journal_mark_freed */
453 static void reiserfs_free_prealloc_block(struct reiserfs_transaction_handle *th,
454 struct inode *inode, b_blocknr_t block)
456 BUG_ON(!th->t_trans_id);
458 "vs-4060: trying to free block on nonexistent device");
459 if (!is_reusable(th->t_super, block, 1))
461 _reiserfs_free_block(th, inode, block, 1);
464 static void __discard_prealloc(struct reiserfs_transaction_handle *th,
465 struct reiserfs_inode_info *ei)
467 unsigned long save = ei->i_prealloc_block;
469 struct inode *inode = &ei->vfs_inode;
471 BUG_ON(!th->t_trans_id);
472 #ifdef CONFIG_REISERFS_CHECK
473 if (ei->i_prealloc_count < 0)
474 reiserfs_error(th->t_super, "zam-4001",
475 "inode has negative prealloc blocks count.");
477 while (ei->i_prealloc_count > 0) {
478 reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
479 ei->i_prealloc_block++;
480 ei->i_prealloc_count--;
484 reiserfs_update_sd(th, inode);
485 ei->i_prealloc_block = save;
486 list_del_init(&(ei->i_prealloc_list));
489 /* FIXME: It should be inline function */
490 void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th,
493 struct reiserfs_inode_info *ei = REISERFS_I(inode);
495 BUG_ON(!th->t_trans_id);
496 if (ei->i_prealloc_count)
497 __discard_prealloc(th, ei);
500 void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th)
502 struct list_head *plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;
504 BUG_ON(!th->t_trans_id);
505 while (!list_empty(plist)) {
506 struct reiserfs_inode_info *ei;
507 ei = list_entry(plist->next, struct reiserfs_inode_info,
509 #ifdef CONFIG_REISERFS_CHECK
510 if (!ei->i_prealloc_count) {
511 reiserfs_error(th->t_super, "zam-4001",
512 "inode is in prealloc list but has "
513 "no preallocated blocks.");
516 __discard_prealloc(th, ei);
520 void reiserfs_init_alloc_options(struct super_block *s)
522 set_bit(_ALLOC_skip_busy, &SB_ALLOC_OPTS(s));
523 set_bit(_ALLOC_dirid_groups, &SB_ALLOC_OPTS(s));
524 set_bit(_ALLOC_packing_groups, &SB_ALLOC_OPTS(s));
527 /* block allocator related options are parsed here */
528 int reiserfs_parse_alloc_options(struct super_block *s, char *options)
530 char *this_char, *value;
532 REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */
534 while ((this_char = strsep(&options, ":")) != NULL) {
535 if ((value = strchr(this_char, '=')) != NULL)
538 if (!strcmp(this_char, "concentrating_formatted_nodes")) {
540 SET_OPTION(concentrating_formatted_nodes);
542 && *value) ? simple_strtoul(value, &value,
544 if (temp <= 0 || temp > 100) {
545 REISERFS_SB(s)->s_alloc_options.border = 10;
547 REISERFS_SB(s)->s_alloc_options.border =
552 if (!strcmp(this_char, "displacing_large_files")) {
553 SET_OPTION(displacing_large_files);
554 REISERFS_SB(s)->s_alloc_options.large_file_size =
556 && *value) ? simple_strtoul(value, &value, 0) : 16;
559 if (!strcmp(this_char, "displacing_new_packing_localities")) {
560 SET_OPTION(displacing_new_packing_localities);
564 if (!strcmp(this_char, "old_hashed_relocation")) {
565 SET_OPTION(old_hashed_relocation);
569 if (!strcmp(this_char, "new_hashed_relocation")) {
570 SET_OPTION(new_hashed_relocation);
574 if (!strcmp(this_char, "dirid_groups")) {
575 SET_OPTION(dirid_groups);
578 if (!strcmp(this_char, "oid_groups")) {
579 SET_OPTION(oid_groups);
582 if (!strcmp(this_char, "packing_groups")) {
583 SET_OPTION(packing_groups);
586 if (!strcmp(this_char, "hashed_formatted_nodes")) {
587 SET_OPTION(hashed_formatted_nodes);
591 if (!strcmp(this_char, "skip_busy")) {
592 SET_OPTION(skip_busy);
596 if (!strcmp(this_char, "hundredth_slices")) {
597 SET_OPTION(hundredth_slices);
601 if (!strcmp(this_char, "old_way")) {
606 if (!strcmp(this_char, "displace_based_on_dirid")) {
607 SET_OPTION(displace_based_on_dirid);
611 if (!strcmp(this_char, "preallocmin")) {
612 REISERFS_SB(s)->s_alloc_options.preallocmin =
614 && *value) ? simple_strtoul(value, &value, 0) : 4;
618 if (!strcmp(this_char, "preallocsize")) {
619 REISERFS_SB(s)->s_alloc_options.preallocsize =
621 && *value) ? simple_strtoul(value, &value,
627 reiserfs_warning(s, "zam-4001", "unknown option - %s",
632 reiserfs_info(s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
636 static void print_sep(struct seq_file *seq, int *first)
644 void show_alloc_options(struct seq_file *seq, struct super_block *s)
648 if (SB_ALLOC_OPTS(s) == ((1 << _ALLOC_skip_busy) |
649 (1 << _ALLOC_dirid_groups) | (1 << _ALLOC_packing_groups)))
652 seq_puts(seq, ",alloc=");
654 if (TEST_OPTION(concentrating_formatted_nodes, s)) {
655 print_sep(seq, &first);
656 if (REISERFS_SB(s)->s_alloc_options.border != 10) {
657 seq_printf(seq, "concentrating_formatted_nodes=%d",
658 100 / REISERFS_SB(s)->s_alloc_options.border);
660 seq_puts(seq, "concentrating_formatted_nodes");
662 if (TEST_OPTION(displacing_large_files, s)) {
663 print_sep(seq, &first);
664 if (REISERFS_SB(s)->s_alloc_options.large_file_size != 16) {
665 seq_printf(seq, "displacing_large_files=%lu",
666 REISERFS_SB(s)->s_alloc_options.large_file_size);
668 seq_puts(seq, "displacing_large_files");
670 if (TEST_OPTION(displacing_new_packing_localities, s)) {
671 print_sep(seq, &first);
672 seq_puts(seq, "displacing_new_packing_localities");
674 if (TEST_OPTION(old_hashed_relocation, s)) {
675 print_sep(seq, &first);
676 seq_puts(seq, "old_hashed_relocation");
678 if (TEST_OPTION(new_hashed_relocation, s)) {
679 print_sep(seq, &first);
680 seq_puts(seq, "new_hashed_relocation");
682 if (TEST_OPTION(dirid_groups, s)) {
683 print_sep(seq, &first);
684 seq_puts(seq, "dirid_groups");
686 if (TEST_OPTION(oid_groups, s)) {
687 print_sep(seq, &first);
688 seq_puts(seq, "oid_groups");
690 if (TEST_OPTION(packing_groups, s)) {
691 print_sep(seq, &first);
692 seq_puts(seq, "packing_groups");
694 if (TEST_OPTION(hashed_formatted_nodes, s)) {
695 print_sep(seq, &first);
696 seq_puts(seq, "hashed_formatted_nodes");
698 if (TEST_OPTION(skip_busy, s)) {
699 print_sep(seq, &first);
700 seq_puts(seq, "skip_busy");
702 if (TEST_OPTION(hundredth_slices, s)) {
703 print_sep(seq, &first);
704 seq_puts(seq, "hundredth_slices");
706 if (TEST_OPTION(old_way, s)) {
707 print_sep(seq, &first);
708 seq_puts(seq, "old_way");
710 if (TEST_OPTION(displace_based_on_dirid, s)) {
711 print_sep(seq, &first);
712 seq_puts(seq, "displace_based_on_dirid");
714 if (REISERFS_SB(s)->s_alloc_options.preallocmin != 0) {
715 print_sep(seq, &first);
716 seq_printf(seq, "preallocmin=%d",
717 REISERFS_SB(s)->s_alloc_options.preallocmin);
719 if (REISERFS_SB(s)->s_alloc_options.preallocsize != 17) {
720 print_sep(seq, &first);
721 seq_printf(seq, "preallocsize=%d",
722 REISERFS_SB(s)->s_alloc_options.preallocsize);
726 static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
730 if (hint->formatted_node) {
731 hash_in = (char *)&hint->key.k_dir_id;
734 //hint->search_start = hint->beg;
735 hash_in = (char *)&hint->key.k_dir_id;
737 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
738 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
741 (char *)(&INODE_PKEY(hint->inode)->k_objectid);
745 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
749 * Relocation based on dirid, hashing them into a given bitmap block
750 * files. Formatted nodes are unaffected, a separate policy covers them
752 static void dirid_groups(reiserfs_blocknr_hint_t * hint)
757 struct super_block *sb = hint->th->t_super;
760 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
761 else if (hint->formatted_node)
762 dirid = hint->key.k_dir_id;
765 bm = bmap_hash_id(sb, dirid);
766 hash = bm * (sb->s_blocksize << 3);
767 /* give a portion of the block group to metadata */
769 hash += sb->s_blocksize / 2;
770 hint->search_start = hash;
775 * Relocation based on oid, hashing them into a given bitmap block
776 * files. Formatted nodes are unaffected, a separate policy covers them
778 static void oid_groups(reiserfs_blocknr_hint_t * hint)
786 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
788 /* keep the root dir and it's first set of subdirs close to
789 * the start of the disk
792 hash = (hint->inode->i_sb->s_blocksize << 3);
794 oid = le32_to_cpu(INODE_PKEY(hint->inode)->k_objectid);
795 bm = bmap_hash_id(hint->inode->i_sb, oid);
796 hash = bm * (hint->inode->i_sb->s_blocksize << 3);
798 hint->search_start = hash;
802 /* returns 1 if it finds an indirect item and gets valid hint info
803 * from it, otherwise 0
805 static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
807 struct treepath *path;
808 struct buffer_head *bh;
809 struct item_head *ih;
814 if (!hint->path) /* reiserfs code can call this function w/o pointer to path
815 * structure supplied; then we rely on supplied search_start */
819 bh = get_last_bh(path);
820 RFALSE(!bh, "green-4002: Illegal path specified to get_left_neighbor");
822 pos_in_item = path->pos_in_item;
823 item = get_item(path);
825 hint->search_start = bh->b_blocknr;
827 if (!hint->formatted_node && is_indirect_le_ih(ih)) {
828 /* for indirect item: go to left and look for the first non-hole entry
829 in the indirect item */
830 if (pos_in_item == I_UNFM_NUM(ih))
832 // pos_in_item = I_UNFM_NUM (ih) - 1;
833 while (pos_in_item >= 0) {
834 int t = get_block_num(item, pos_in_item);
836 hint->search_start = t;
844 /* does result value fit into specified region? */
848 /* should be, if formatted node, then try to put on first part of the device
849 specified as number of percent with mount option device, else try to put
850 on last of device. This is not to say it is good code to do so,
851 but the effect should be measured. */
852 static inline void set_border_in_hint(struct super_block *s,
853 reiserfs_blocknr_hint_t * hint)
856 SB_BLOCK_COUNT(s) / REISERFS_SB(s)->s_alloc_options.border;
858 if (hint->formatted_node)
859 hint->end = border - 1;
864 static inline void displace_large_file(reiserfs_blocknr_hint_t * hint)
866 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
869 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_dir_id),
870 4) % (hint->end - hint->beg);
874 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_objectid),
875 4) % (hint->end - hint->beg);
878 static inline void hash_formatted_node(reiserfs_blocknr_hint_t * hint)
883 hash_in = (char *)&hint->key.k_dir_id;
884 else if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
885 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
887 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
890 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
894 this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t *
897 return hint->block ==
898 REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size;
901 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
902 static inline void displace_new_packing_locality(reiserfs_blocknr_hint_t * hint)
904 struct in_core_key *key = &hint->key;
906 hint->th->displace_new_blocks = 0;
908 hint->beg + keyed_hash((char *)(&key->k_objectid),
909 4) % (hint->end - hint->beg);
913 static inline int old_hashed_relocation(reiserfs_blocknr_hint_t * hint)
918 if (hint->formatted_node || hint->inode == NULL) {
922 hash_in = le32_to_cpu((INODE_PKEY(hint->inode))->k_dir_id);
924 hint->beg + (u32) keyed_hash(((char *)(&hash_in)),
925 4) % (hint->end - hint->beg - 1);
926 if (border > hint->search_start)
927 hint->search_start = border;
932 static inline int old_way(reiserfs_blocknr_hint_t * hint)
936 if (hint->formatted_node || hint->inode == NULL) {
942 le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id) % (hint->end -
944 if (border > hint->search_start)
945 hint->search_start = border;
950 static inline void hundredth_slices(reiserfs_blocknr_hint_t * hint)
952 struct in_core_key *key = &hint->key;
953 b_blocknr_t slice_start;
956 (keyed_hash((char *)(&key->k_dir_id), 4) % 100) * (hint->end / 100);
957 if (slice_start > hint->search_start
958 || slice_start + (hint->end / 100) <= hint->search_start) {
959 hint->search_start = slice_start;
963 static void determine_search_start(reiserfs_blocknr_hint_t * hint,
966 struct super_block *s = hint->th->t_super;
970 hint->end = SB_BLOCK_COUNT(s) - 1;
972 /* This is former border algorithm. Now with tunable border offset */
973 if (concentrating_formatted_nodes(s))
974 set_border_in_hint(s, hint);
976 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
977 /* whenever we create a new directory, we displace it. At first we will
978 hash for location, later we might look for a moderately empty place for
980 if (displacing_new_packing_localities(s)
981 && hint->th->displace_new_blocks) {
982 displace_new_packing_locality(hint);
984 /* we do not continue determine_search_start,
985 * if new packing locality is being displaced */
990 /* all persons should feel encouraged to add more special cases here and
993 if (displacing_large_files(s) && !hint->formatted_node
994 && this_blocknr_allocation_would_make_it_a_large_file(hint)) {
995 displace_large_file(hint);
999 /* if none of our special cases is relevant, use the left neighbor in the
1000 tree order of the new node we are allocating for */
1001 if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) {
1002 hash_formatted_node(hint);
1006 unfm_hint = get_left_neighbor(hint);
1008 /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
1009 new blocks are displaced based on directory ID. Also, if suggested search_start
1010 is less than last preallocated block, we start searching from it, assuming that
1011 HDD dataflow is faster in forward direction */
1012 if (TEST_OPTION(old_way, s)) {
1013 if (!hint->formatted_node) {
1014 if (!reiserfs_hashed_relocation(s))
1016 else if (!reiserfs_no_unhashed_relocation(s))
1017 old_hashed_relocation(hint);
1020 && hint->search_start <
1021 REISERFS_I(hint->inode)->i_prealloc_block)
1022 hint->search_start =
1023 REISERFS_I(hint->inode)->i_prealloc_block;
1028 /* This is an approach proposed by Hans */
1029 if (TEST_OPTION(hundredth_slices, s)
1030 && !(displacing_large_files(s) && !hint->formatted_node)) {
1031 hundredth_slices(hint);
1035 /* old_hashed_relocation only works on unformatted */
1036 if (!unfm_hint && !hint->formatted_node &&
1037 TEST_OPTION(old_hashed_relocation, s)) {
1038 old_hashed_relocation(hint);
1040 /* new_hashed_relocation works with both formatted/unformatted nodes */
1041 if ((!unfm_hint || hint->formatted_node) &&
1042 TEST_OPTION(new_hashed_relocation, s)) {
1043 new_hashed_relocation(hint);
1045 /* dirid grouping works only on unformatted nodes */
1046 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
1049 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
1050 if (hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
1055 /* oid grouping works only on unformatted nodes */
1056 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups, s)) {
1062 static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
1064 /* make minimum size a mount option and benchmark both ways */
1065 /* we preallocate blocks only for regular files, specific size */
1066 /* benchmark preallocating always and see what happens */
1068 hint->prealloc_size = 0;
1070 if (!hint->formatted_node && hint->preallocate) {
1071 if (S_ISREG(hint->inode->i_mode)
1072 && hint->inode->i_size >=
1073 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1074 preallocmin * hint->inode->i_sb->s_blocksize)
1075 hint->prealloc_size =
1076 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1082 /* XXX I know it could be merged with upper-level function;
1083 but may be result function would be too complex. */
1084 static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
1085 b_blocknr_t * new_blocknrs,
1087 b_blocknr_t finish, int min,
1091 int rest = amount_needed;
1094 while (rest > 0 && start <= finish) {
1095 nr_allocated = scan_bitmap(hint->th, &start, finish, min,
1096 rest + prealloc_size,
1097 !hint->formatted_node, hint->block);
1099 if (nr_allocated == 0) /* no new blocks allocated, return */
1102 /* fill free_blocknrs array first */
1103 while (rest > 0 && nr_allocated > 0) {
1104 *new_blocknrs++ = start++;
1109 /* do we have something to fill prealloc. array also ? */
1110 if (nr_allocated > 0) {
1111 /* it means prealloc_size was greater that 0 and we do preallocation */
1112 list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
1113 &SB_JOURNAL(hint->th->t_super)->
1115 REISERFS_I(hint->inode)->i_prealloc_block = start;
1116 REISERFS_I(hint->inode)->i_prealloc_count =
1122 return (amount_needed - rest);
1125 static inline int blocknrs_and_prealloc_arrays_from_search_start
1126 (reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs,
1127 int amount_needed) {
1128 struct super_block *s = hint->th->t_super;
1129 b_blocknr_t start = hint->search_start;
1130 b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
1132 int nr_allocated = 0;
1135 determine_prealloc_size(hint);
1136 if (!hint->formatted_node) {
1138 #ifdef REISERQUOTA_DEBUG
1139 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1140 "reiserquota: allocating %d blocks id=%u",
1141 amount_needed, hint->inode->i_uid);
1143 depth = reiserfs_write_unlock_nested(s);
1145 dquot_alloc_block_nodirty(hint->inode, amount_needed);
1146 if (quota_ret) { /* Quota exceeded? */
1147 reiserfs_write_lock_nested(s, depth);
1148 return QUOTA_EXCEEDED;
1150 if (hint->preallocate && hint->prealloc_size) {
1151 #ifdef REISERQUOTA_DEBUG
1152 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1153 "reiserquota: allocating (prealloc) %d blocks id=%u",
1154 hint->prealloc_size, hint->inode->i_uid);
1156 quota_ret = dquot_prealloc_block_nodirty(hint->inode,
1157 hint->prealloc_size);
1159 hint->preallocate = hint->prealloc_size = 0;
1161 /* for unformatted nodes, force large allocations */
1162 reiserfs_write_lock_nested(s, depth);
1167 case 0: /* Search from hint->search_start to end of disk */
1168 start = hint->search_start;
1169 finish = SB_BLOCK_COUNT(s) - 1;
1171 case 1: /* Search from hint->beg to hint->search_start */
1173 finish = hint->search_start;
1175 case 2: /* Last chance: Search from 0 to hint->beg */
1179 default: /* We've tried searching everywhere, not enough space */
1180 /* Free the blocks */
1181 if (!hint->formatted_node) {
1182 #ifdef REISERQUOTA_DEBUG
1183 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1184 "reiserquota: freeing (nospace) %d blocks id=%u",
1186 hint->prealloc_size -
1188 hint->inode->i_uid);
1190 /* Free not allocated blocks */
1191 depth = reiserfs_write_unlock_nested(s);
1192 dquot_free_block_nodirty(hint->inode,
1193 amount_needed + hint->prealloc_size -
1195 reiserfs_write_lock_nested(s, depth);
1197 while (nr_allocated--)
1198 reiserfs_free_block(hint->th, hint->inode,
1199 new_blocknrs[nr_allocated],
1200 !hint->formatted_node);
1202 return NO_DISK_SPACE;
1204 } while ((nr_allocated += allocate_without_wrapping_disk(hint,
1214 if (!hint->formatted_node &&
1215 amount_needed + hint->prealloc_size >
1216 nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) {
1217 /* Some of preallocation blocks were not allocated */
1218 #ifdef REISERQUOTA_DEBUG
1219 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1220 "reiserquota: freeing (failed prealloc) %d blocks id=%u",
1221 amount_needed + hint->prealloc_size -
1223 REISERFS_I(hint->inode)->i_prealloc_count,
1224 hint->inode->i_uid);
1227 depth = reiserfs_write_unlock_nested(s);
1228 dquot_free_block_nodirty(hint->inode, amount_needed +
1229 hint->prealloc_size - nr_allocated -
1230 REISERFS_I(hint->inode)->
1232 reiserfs_write_lock_nested(s, depth);
1238 /* grab new blocknrs from preallocated list */
1239 /* return amount still needed after using them */
1240 static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint,
1241 b_blocknr_t * new_blocknrs,
1244 struct inode *inode = hint->inode;
1246 if (REISERFS_I(inode)->i_prealloc_count > 0) {
1247 while (amount_needed) {
1249 *new_blocknrs++ = REISERFS_I(inode)->i_prealloc_block++;
1250 REISERFS_I(inode)->i_prealloc_count--;
1254 if (REISERFS_I(inode)->i_prealloc_count <= 0) {
1255 list_del(&REISERFS_I(inode)->i_prealloc_list);
1260 /* return amount still needed after using preallocated blocks */
1261 return amount_needed;
1264 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us /* Amount of blocks we have
1265 already reserved */ )
1267 int initial_amount_needed = amount_needed;
1269 struct super_block *s = hint->th->t_super;
1271 /* Check if there is enough space, taking into account reserved space */
1272 if (SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks <
1273 amount_needed - reserved_by_us)
1274 return NO_DISK_SPACE;
1275 /* should this be if !hint->inode && hint->preallocate? */
1276 /* do you mean hint->formatted_node can be removed ? - Zam */
1277 /* hint->formatted_node cannot be removed because we try to access
1278 inode information here, and there is often no inode assotiated with
1279 metadata allocations - green */
1281 if (!hint->formatted_node && hint->preallocate) {
1282 amount_needed = use_preallocated_list_if_available
1283 (hint, new_blocknrs, amount_needed);
1284 if (amount_needed == 0) /* all blocknrs we need we got from
1287 new_blocknrs += (initial_amount_needed - amount_needed);
1290 /* find search start and save it in hint structure */
1291 determine_search_start(hint, amount_needed);
1292 if (hint->search_start >= SB_BLOCK_COUNT(s))
1293 hint->search_start = SB_BLOCK_COUNT(s) - 1;
1295 /* allocation itself; fill new_blocknrs and preallocation arrays */
1296 ret = blocknrs_and_prealloc_arrays_from_search_start
1297 (hint, new_blocknrs, amount_needed);
1299 /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
1300 * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
1303 if (ret != CARRY_ON) {
1304 while (amount_needed++ < initial_amount_needed) {
1305 reiserfs_free_block(hint->th, hint->inode,
1306 *(--new_blocknrs), 1);
1312 void reiserfs_cache_bitmap_metadata(struct super_block *sb,
1313 struct buffer_head *bh,
1314 struct reiserfs_bitmap_info *info)
1316 unsigned long *cur = (unsigned long *)(bh->b_data + bh->b_size);
1318 /* The first bit must ALWAYS be 1 */
1319 if (!reiserfs_test_le_bit(0, (unsigned long *)bh->b_data))
1320 reiserfs_error(sb, "reiserfs-2025", "bitmap block %lu is "
1321 "corrupted: first bit must be 1", bh->b_blocknr);
1323 info->free_count = 0;
1325 while (--cur >= (unsigned long *)bh->b_data) {
1326 /* 0 and ~0 are special, we can optimize for them */
1328 info->free_count += BITS_PER_LONG;
1329 else if (*cur != ~0L) /* A mix, investigate */
1330 info->free_count += BITS_PER_LONG - hweight_long(*cur);
1334 struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1335 unsigned int bitmap)
1337 b_blocknr_t block = (sb->s_blocksize << 3) * bitmap;
1338 struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap;
1339 struct buffer_head *bh;
1341 /* Way old format filesystems had the bitmaps packed up front.
1342 * I doubt there are any of these left, but just in case... */
1343 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
1344 &(REISERFS_SB(sb)->s_properties))))
1345 block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
1346 else if (bitmap == 0)
1347 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
1349 bh = sb_bread(sb, block);
1351 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
1352 "reading failed", __func__, block);
1354 if (buffer_locked(bh)) {
1356 PROC_INFO_INC(sb, scan_bitmap.wait);
1357 depth = reiserfs_write_unlock_nested(sb);
1358 __wait_on_buffer(bh);
1359 reiserfs_write_lock_nested(sb, depth);
1361 BUG_ON(!buffer_uptodate(bh));
1362 BUG_ON(atomic_read(&bh->b_count) == 0);
1364 if (info->free_count == UINT_MAX)
1365 reiserfs_cache_bitmap_metadata(sb, bh, info);
1371 int reiserfs_init_bitmap_cache(struct super_block *sb)
1373 struct reiserfs_bitmap_info *bitmap;
1374 unsigned int bmap_nr = reiserfs_bmap_count(sb);
1376 bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
1380 memset(bitmap, 0xff, sizeof(*bitmap) * bmap_nr);
1382 SB_AP_BITMAP(sb) = bitmap;
1387 void reiserfs_free_bitmap_cache(struct super_block *sb)
1389 if (SB_AP_BITMAP(sb)) {
1390 vfree(SB_AP_BITMAP(sb));
1391 SB_AP_BITMAP(sb) = NULL;