2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/prefetch.h>
17 #include <linux/blkdev.h>
18 #include <linux/rbtree.h>
19 #include <linux/random.h>
34 #include "trace_gfs2.h"
36 #define BFITNOENT ((u32)~0)
37 #define NO_BLOCK ((u64)~0)
39 #if BITS_PER_LONG == 32
40 #define LBITMASK (0x55555555UL)
41 #define LBITSKIP55 (0x55555555UL)
42 #define LBITSKIP00 (0x00000000UL)
44 #define LBITMASK (0x5555555555555555UL)
45 #define LBITSKIP55 (0x5555555555555555UL)
46 #define LBITSKIP00 (0x0000000000000000UL)
50 * These routines are used by the resource group routines (rgrp.c)
51 * to keep track of block allocation. Each block is represented by two
52 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 * 1 = Used (not metadata)
56 * 2 = Unlinked (still in use) inode
60 static const char valid_change[16] = {
68 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
69 const struct gfs2_inode *ip, bool nowrap);
73 * gfs2_setbit - Set a bit in the bitmaps
74 * @rbm: The position of the bit to set
75 * @do_clone: Also set the clone bitmap, if it exists
76 * @new_state: the new state of the block
80 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
81 unsigned char new_state)
83 unsigned char *byte1, *byte2, *end, cur_state;
84 unsigned int buflen = rbm->bi->bi_len;
85 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
87 byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
88 end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
92 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
94 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
95 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
96 "new_state=%d\n", rbm->offset, cur_state, new_state);
97 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
98 (unsigned long long)rbm->rgd->rd_addr,
100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
101 rbm->bi->bi_offset, rbm->bi->bi_len);
103 gfs2_consist_rgrpd(rbm->rgd);
106 *byte1 ^= (cur_state ^ new_state) << bit;
108 if (do_clone && rbm->bi->bi_clone) {
109 byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
111 *byte2 ^= (cur_state ^ new_state) << bit;
116 * gfs2_testbit - test a bit in the bitmaps
117 * @rbm: The bit to test
119 * Returns: The two bit block state of the requested bit
122 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
124 const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
128 byte = buffer + (rbm->offset / GFS2_NBBY);
129 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
131 return (*byte >> bit) & GFS2_BIT_MASK;
136 * @ptr: Pointer to bitmap data
137 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
138 * @state: The state we are searching for
140 * We xor the bitmap data with a patter which is the bitwise opposite
141 * of what we are looking for, this gives rise to a pattern of ones
142 * wherever there is a match. Since we have two bits per entry, we
143 * take this pattern, shift it down by one place and then and it with
144 * the original. All the even bit positions (0,2,4, etc) then represent
145 * successful matches, so we mask with 0x55555..... to remove the unwanted
148 * This allows searching of a whole u64 at once (32 blocks) with a
149 * single test (on 64 bit arches).
152 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
155 static const u64 search[] = {
156 [0] = 0xffffffffffffffffULL,
157 [1] = 0xaaaaaaaaaaaaaaaaULL,
158 [2] = 0x5555555555555555ULL,
159 [3] = 0x0000000000000000ULL,
161 tmp = le64_to_cpu(*ptr) ^ search[state];
168 * rs_cmp - multi-block reservation range compare
169 * @blk: absolute file system block number of the new reservation
170 * @len: number of blocks in the new reservation
171 * @rs: existing reservation to compare against
173 * returns: 1 if the block range is beyond the reach of the reservation
174 * -1 if the block range is before the start of the reservation
175 * 0 if the block range overlaps with the reservation
177 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
179 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
181 if (blk >= startblk + rs->rs_free)
183 if (blk + len - 1 < startblk)
189 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
190 * a block in a given allocation state.
191 * @buf: the buffer that holds the bitmaps
192 * @len: the length (in bytes) of the buffer
193 * @goal: start search at this block's bit-pair (within @buffer)
194 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
196 * Scope of @goal and returned block number is only within this bitmap buffer,
197 * not entire rgrp or filesystem. @buffer will be offset from the actual
198 * beginning of a bitmap block buffer, skipping any header structures, but
199 * headers are always a multiple of 64 bits long so that the buffer is
200 * always aligned to a 64 bit boundary.
202 * The size of the buffer is in bytes, but is it assumed that it is
203 * always ok to read a complete multiple of 64 bits at the end
204 * of the block in case the end is no aligned to a natural boundary.
206 * Return: the block number (bitmap buffer scope) that was found
209 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
212 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
213 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
214 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
216 u64 mask = 0x5555555555555555ULL;
219 /* Mask off bits we don't care about at the start of the search */
221 tmp = gfs2_bit_search(ptr, mask, state);
223 while(tmp == 0 && ptr < end) {
224 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
227 /* Mask off any bits which are more than len bytes from the start */
228 if (ptr == end && (len & (sizeof(u64) - 1)))
229 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
230 /* Didn't find anything, so return */
235 bit /= 2; /* two bits per entry in the bitmap */
236 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
240 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
241 * @rbm: The rbm with rgd already set correctly
242 * @block: The block number (filesystem relative)
244 * This sets the bi and offset members of an rbm based on a
245 * resource group and a filesystem relative block number. The
246 * resource group must be set in the rbm on entry, the bi and
247 * offset members will be set by this function.
249 * Returns: 0 on success, or an error code
252 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
254 u64 rblock = block - rbm->rgd->rd_data0;
257 if (WARN_ON_ONCE(rblock > UINT_MAX))
259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
262 rbm->bi = rbm->rgd->rd_bits;
263 rbm->offset = (u32)(rblock);
264 /* Check if the block is within the first block */
265 if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
269 rbm->offset += (sizeof(struct gfs2_rgrp) -
270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
271 x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
272 rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
278 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
279 * @rbm: Position to search (value/result)
280 * @n_unaligned: Number of unaligned blocks to check
281 * @len: Decremented for each block found (terminate on zero)
283 * Returns: true if a non-free block is encountered
286 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
292 for (n = 0; n < n_unaligned; n++) {
293 res = gfs2_testbit(rbm);
294 if (res != GFS2_BLKST_FREE)
299 block = gfs2_rbm_to_block(rbm);
300 if (gfs2_rbm_from_block(rbm, block + 1))
308 * gfs2_free_extlen - Return extent length of free blocks
309 * @rbm: Starting position
310 * @len: Max length to check
312 * Starting at the block specified by the rbm, see how many free blocks
313 * there are, not reading more than len blocks ahead. This can be done
314 * using memchr_inv when the blocks are byte aligned, but has to be done
315 * on a block by block basis in case of unaligned blocks. Also this
316 * function can cope with bitmap boundaries (although it must stop on
317 * a resource group boundary)
319 * Returns: Number of free blocks in the extent
322 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
324 struct gfs2_rbm rbm = *rrbm;
325 u32 n_unaligned = rbm.offset & 3;
329 u8 *ptr, *start, *end;
333 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
336 n_unaligned = len & 3;
337 /* Start is now byte aligned */
339 start = rbm.bi->bi_bh->b_data;
340 if (rbm.bi->bi_clone)
341 start = rbm.bi->bi_clone;
342 end = start + rbm.bi->bi_bh->b_size;
343 start += rbm.bi->bi_offset;
344 BUG_ON(rbm.offset & 3);
345 start += (rbm.offset / GFS2_NBBY);
346 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
347 ptr = memchr_inv(start, 0, bytes);
348 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
349 chunk_size *= GFS2_NBBY;
350 BUG_ON(len < chunk_size);
352 block = gfs2_rbm_to_block(&rbm);
353 gfs2_rbm_from_block(&rbm, block + chunk_size);
357 n_unaligned = len & 3;
360 /* Deal with any bits left over at the end */
362 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
368 * gfs2_bitcount - count the number of bits in a certain state
369 * @rgd: the resource group descriptor
370 * @buffer: the buffer that holds the bitmaps
371 * @buflen: the length (in bytes) of the buffer
372 * @state: the state of the block we're looking for
374 * Returns: The number of bits
377 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
378 unsigned int buflen, u8 state)
380 const u8 *byte = buffer;
381 const u8 *end = buffer + buflen;
382 const u8 state1 = state << 2;
383 const u8 state2 = state << 4;
384 const u8 state3 = state << 6;
387 for (; byte < end; byte++) {
388 if (((*byte) & 0x03) == state)
390 if (((*byte) & 0x0C) == state1)
392 if (((*byte) & 0x30) == state2)
394 if (((*byte) & 0xC0) == state3)
402 * gfs2_rgrp_verify - Verify that a resource group is consistent
407 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
409 struct gfs2_sbd *sdp = rgd->rd_sbd;
410 struct gfs2_bitmap *bi = NULL;
411 u32 length = rgd->rd_length;
415 memset(count, 0, 4 * sizeof(u32));
417 /* Count # blocks in each of 4 possible allocation states */
418 for (buf = 0; buf < length; buf++) {
419 bi = rgd->rd_bits + buf;
420 for (x = 0; x < 4; x++)
421 count[x] += gfs2_bitcount(rgd,
427 if (count[0] != rgd->rd_free) {
428 if (gfs2_consist_rgrpd(rgd))
429 fs_err(sdp, "free data mismatch: %u != %u\n",
430 count[0], rgd->rd_free);
434 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
435 if (count[1] != tmp) {
436 if (gfs2_consist_rgrpd(rgd))
437 fs_err(sdp, "used data mismatch: %u != %u\n",
442 if (count[2] + count[3] != rgd->rd_dinodes) {
443 if (gfs2_consist_rgrpd(rgd))
444 fs_err(sdp, "used metadata mismatch: %u != %u\n",
445 count[2] + count[3], rgd->rd_dinodes);
450 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
452 u64 first = rgd->rd_data0;
453 u64 last = first + rgd->rd_data;
454 return first <= block && block < last;
458 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
459 * @sdp: The GFS2 superblock
460 * @blk: The data block number
461 * @exact: True if this needs to be an exact match
463 * Returns: The resource group, or NULL if not found
466 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
468 struct rb_node *n, *next;
469 struct gfs2_rgrpd *cur;
471 spin_lock(&sdp->sd_rindex_spin);
472 n = sdp->sd_rindex_tree.rb_node;
474 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
476 if (blk < cur->rd_addr)
478 else if (blk >= cur->rd_data0 + cur->rd_data)
481 spin_unlock(&sdp->sd_rindex_spin);
483 if (blk < cur->rd_addr)
485 if (blk >= cur->rd_data0 + cur->rd_data)
492 spin_unlock(&sdp->sd_rindex_spin);
498 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
499 * @sdp: The GFS2 superblock
501 * Returns: The first rgrp in the filesystem
504 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
506 const struct rb_node *n;
507 struct gfs2_rgrpd *rgd;
509 spin_lock(&sdp->sd_rindex_spin);
510 n = rb_first(&sdp->sd_rindex_tree);
511 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
512 spin_unlock(&sdp->sd_rindex_spin);
518 * gfs2_rgrpd_get_next - get the next RG
519 * @rgd: the resource group descriptor
521 * Returns: The next rgrp
524 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
526 struct gfs2_sbd *sdp = rgd->rd_sbd;
527 const struct rb_node *n;
529 spin_lock(&sdp->sd_rindex_spin);
530 n = rb_next(&rgd->rd_node);
532 n = rb_first(&sdp->sd_rindex_tree);
534 if (unlikely(&rgd->rd_node == n)) {
535 spin_unlock(&sdp->sd_rindex_spin);
538 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
539 spin_unlock(&sdp->sd_rindex_spin);
543 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
547 for (x = 0; x < rgd->rd_length; x++) {
548 struct gfs2_bitmap *bi = rgd->rd_bits + x;
555 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
556 * @ip: the inode for this reservation
558 int gfs2_rs_alloc(struct gfs2_inode *ip)
562 down_write(&ip->i_rw_mutex);
566 ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
572 RB_CLEAR_NODE(&ip->i_res->rs_node);
574 up_write(&ip->i_rw_mutex);
578 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
580 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
581 (unsigned long long)rs->rs_inum,
582 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
583 rs->rs_rbm.offset, rs->rs_free);
587 * __rs_deltree - remove a multi-block reservation from the rgd tree
588 * @rs: The reservation to remove
591 static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
593 struct gfs2_rgrpd *rgd;
595 if (!gfs2_rs_active(rs))
598 rgd = rs->rs_rbm.rgd;
599 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
600 rb_erase(&rs->rs_node, &rgd->rd_rstree);
601 RB_CLEAR_NODE(&rs->rs_node);
604 /* return reserved blocks to the rgrp and the ip */
605 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
606 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
608 clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
609 smp_mb__after_clear_bit();
614 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
615 * @rs: The reservation to remove
618 void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
620 struct gfs2_rgrpd *rgd;
622 rgd = rs->rs_rbm.rgd;
624 spin_lock(&rgd->rd_rsspin);
625 __rs_deltree(ip, rs);
626 spin_unlock(&rgd->rd_rsspin);
631 * gfs2_rs_delete - delete a multi-block reservation
632 * @ip: The inode for this reservation
635 void gfs2_rs_delete(struct gfs2_inode *ip)
637 down_write(&ip->i_rw_mutex);
639 gfs2_rs_deltree(ip, ip->i_res);
640 BUG_ON(ip->i_res->rs_free);
641 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
644 up_write(&ip->i_rw_mutex);
648 * return_all_reservations - return all reserved blocks back to the rgrp.
649 * @rgd: the rgrp that needs its space back
651 * We previously reserved a bunch of blocks for allocation. Now we need to
652 * give them back. This leave the reservation structures in tact, but removes
653 * all of their corresponding "no-fly zones".
655 static void return_all_reservations(struct gfs2_rgrpd *rgd)
658 struct gfs2_blkreserv *rs;
660 spin_lock(&rgd->rd_rsspin);
661 while ((n = rb_first(&rgd->rd_rstree))) {
662 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
663 __rs_deltree(NULL, rs);
665 spin_unlock(&rgd->rd_rsspin);
668 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
671 struct gfs2_rgrpd *rgd;
672 struct gfs2_glock *gl;
674 while ((n = rb_first(&sdp->sd_rindex_tree))) {
675 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
678 rb_erase(n, &sdp->sd_rindex_tree);
681 spin_lock(&gl->gl_spin);
682 gl->gl_object = NULL;
683 spin_unlock(&gl->gl_spin);
684 gfs2_glock_add_to_lru(gl);
688 gfs2_free_clones(rgd);
690 return_all_reservations(rgd);
691 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
695 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
697 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
698 printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
699 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
700 printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
701 printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
705 * gfs2_compute_bitstructs - Compute the bitmap sizes
706 * @rgd: The resource group descriptor
708 * Calculates bitmap descriptors, one for each block that contains bitmap data
713 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
715 struct gfs2_sbd *sdp = rgd->rd_sbd;
716 struct gfs2_bitmap *bi;
717 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
718 u32 bytes_left, bytes;
724 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
728 bytes_left = rgd->rd_bitbytes;
730 for (x = 0; x < length; x++) {
731 bi = rgd->rd_bits + x;
734 /* small rgrp; bitmap stored completely in header block */
737 bi->bi_offset = sizeof(struct gfs2_rgrp);
742 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
743 bi->bi_offset = sizeof(struct gfs2_rgrp);
747 } else if (x + 1 == length) {
749 bi->bi_offset = sizeof(struct gfs2_meta_header);
750 bi->bi_start = rgd->rd_bitbytes - bytes_left;
754 bytes = sdp->sd_sb.sb_bsize -
755 sizeof(struct gfs2_meta_header);
756 bi->bi_offset = sizeof(struct gfs2_meta_header);
757 bi->bi_start = rgd->rd_bitbytes - bytes_left;
765 gfs2_consist_rgrpd(rgd);
768 bi = rgd->rd_bits + (length - 1);
769 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
770 if (gfs2_consist_rgrpd(rgd)) {
771 gfs2_rindex_print(rgd);
772 fs_err(sdp, "start=%u len=%u offset=%u\n",
773 bi->bi_start, bi->bi_len, bi->bi_offset);
782 * gfs2_ri_total - Total up the file system space, according to the rindex.
783 * @sdp: the filesystem
786 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
789 struct inode *inode = sdp->sd_rindex;
790 struct gfs2_inode *ip = GFS2_I(inode);
791 char buf[sizeof(struct gfs2_rindex)];
794 for (rgrps = 0;; rgrps++) {
795 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
797 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
799 error = gfs2_internal_read(ip, buf, &pos,
800 sizeof(struct gfs2_rindex));
801 if (error != sizeof(struct gfs2_rindex))
803 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
808 static int rgd_insert(struct gfs2_rgrpd *rgd)
810 struct gfs2_sbd *sdp = rgd->rd_sbd;
811 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
813 /* Figure out where to put new node */
815 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
819 if (rgd->rd_addr < cur->rd_addr)
820 newn = &((*newn)->rb_left);
821 else if (rgd->rd_addr > cur->rd_addr)
822 newn = &((*newn)->rb_right);
827 rb_link_node(&rgd->rd_node, parent, newn);
828 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
834 * read_rindex_entry - Pull in a new resource index entry from the disk
835 * @ip: Pointer to the rindex inode
837 * Returns: 0 on success, > 0 on EOF, error code otherwise
840 static int read_rindex_entry(struct gfs2_inode *ip)
842 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
843 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
844 struct gfs2_rindex buf;
846 struct gfs2_rgrpd *rgd;
848 if (pos >= i_size_read(&ip->i_inode))
851 error = gfs2_internal_read(ip, (char *)&buf, &pos,
852 sizeof(struct gfs2_rindex));
854 if (error != sizeof(struct gfs2_rindex))
855 return (error == 0) ? 1 : error;
857 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
863 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
864 rgd->rd_length = be32_to_cpu(buf.ri_length);
865 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
866 rgd->rd_data = be32_to_cpu(buf.ri_data);
867 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
868 spin_lock_init(&rgd->rd_rsspin);
870 error = compute_bitstructs(rgd);
874 error = gfs2_glock_get(sdp, rgd->rd_addr,
875 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
879 rgd->rd_gl->gl_object = rgd;
880 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
881 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
882 if (rgd->rd_data > sdp->sd_max_rg_data)
883 sdp->sd_max_rg_data = rgd->rd_data;
884 spin_lock(&sdp->sd_rindex_spin);
885 error = rgd_insert(rgd);
886 spin_unlock(&sdp->sd_rindex_spin);
890 error = 0; /* someone else read in the rgrp; free it and ignore it */
891 gfs2_glock_put(rgd->rd_gl);
895 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
900 * gfs2_ri_update - Pull in a new resource index from the disk
901 * @ip: pointer to the rindex inode
903 * Returns: 0 on successful update, error code otherwise
906 static int gfs2_ri_update(struct gfs2_inode *ip)
908 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
912 error = read_rindex_entry(ip);
913 } while (error == 0);
918 sdp->sd_rindex_uptodate = 1;
923 * gfs2_rindex_update - Update the rindex if required
924 * @sdp: The GFS2 superblock
926 * We grab a lock on the rindex inode to make sure that it doesn't
927 * change whilst we are performing an operation. We keep this lock
928 * for quite long periods of time compared to other locks. This
929 * doesn't matter, since it is shared and it is very, very rarely
930 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
932 * This makes sure that we're using the latest copy of the resource index
933 * special file, which might have been updated if someone expanded the
934 * filesystem (via gfs2_grow utility), which adds new resource groups.
936 * Returns: 0 on succeess, error code otherwise
939 int gfs2_rindex_update(struct gfs2_sbd *sdp)
941 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
942 struct gfs2_glock *gl = ip->i_gl;
943 struct gfs2_holder ri_gh;
945 int unlock_required = 0;
947 /* Read new copy from disk if we don't have the latest */
948 if (!sdp->sd_rindex_uptodate) {
949 if (!gfs2_glock_is_locked_by_me(gl)) {
950 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
955 if (!sdp->sd_rindex_uptodate)
956 error = gfs2_ri_update(ip);
958 gfs2_glock_dq_uninit(&ri_gh);
964 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
966 const struct gfs2_rgrp *str = buf;
969 rg_flags = be32_to_cpu(str->rg_flags);
970 rg_flags &= ~GFS2_RDF_MASK;
971 rgd->rd_flags &= GFS2_RDF_MASK;
972 rgd->rd_flags |= rg_flags;
973 rgd->rd_free = be32_to_cpu(str->rg_free);
974 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
975 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
978 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
980 struct gfs2_rgrp *str = buf;
982 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
983 str->rg_free = cpu_to_be32(rgd->rd_free);
984 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
985 str->__pad = cpu_to_be32(0);
986 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
987 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
990 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
992 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
993 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
995 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
996 rgl->rl_dinodes != str->rg_dinodes ||
997 rgl->rl_igeneration != str->rg_igeneration)
1002 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1004 const struct gfs2_rgrp *str = buf;
1006 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1007 rgl->rl_flags = str->rg_flags;
1008 rgl->rl_free = str->rg_free;
1009 rgl->rl_dinodes = str->rg_dinodes;
1010 rgl->rl_igeneration = str->rg_igeneration;
1014 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1016 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1017 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1018 rgl->rl_unlinked = cpu_to_be32(unlinked);
1021 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1023 struct gfs2_bitmap *bi;
1024 const u32 length = rgd->rd_length;
1025 const u8 *buffer = NULL;
1026 u32 i, goal, count = 0;
1028 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1030 buffer = bi->bi_bh->b_data + bi->bi_offset;
1031 WARN_ON(!buffer_uptodate(bi->bi_bh));
1032 while (goal < bi->bi_len * GFS2_NBBY) {
1033 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1034 GFS2_BLKST_UNLINKED);
1035 if (goal == BFITNOENT)
1047 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1048 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1050 * Read in all of a Resource Group's header and bitmap blocks.
1051 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1056 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1058 struct gfs2_sbd *sdp = rgd->rd_sbd;
1059 struct gfs2_glock *gl = rgd->rd_gl;
1060 unsigned int length = rgd->rd_length;
1061 struct gfs2_bitmap *bi;
1065 if (rgd->rd_bits[0].bi_bh != NULL)
1068 for (x = 0; x < length; x++) {
1069 bi = rgd->rd_bits + x;
1070 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
1075 for (y = length; y--;) {
1076 bi = rgd->rd_bits + y;
1077 error = gfs2_meta_wait(sdp, bi->bi_bh);
1080 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1081 GFS2_METATYPE_RG)) {
1087 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1088 for (x = 0; x < length; x++)
1089 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1090 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1091 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1092 rgd->rd_free_clone = rgd->rd_free;
1094 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1095 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1096 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1097 rgd->rd_bits[0].bi_bh->b_data);
1099 else if (sdp->sd_args.ar_rgrplvb) {
1100 if (!gfs2_rgrp_lvb_valid(rgd)){
1101 gfs2_consist_rgrpd(rgd);
1105 if (rgd->rd_rgl->rl_unlinked == 0)
1106 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1112 bi = rgd->rd_bits + x;
1115 gfs2_assert_warn(sdp, !bi->bi_clone);
1121 int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1125 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1128 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1129 return gfs2_rgrp_bh_get(rgd);
1131 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1132 rl_flags &= ~GFS2_RDF_MASK;
1133 rgd->rd_flags &= GFS2_RDF_MASK;
1134 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1135 if (rgd->rd_rgl->rl_unlinked == 0)
1136 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1137 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1138 rgd->rd_free_clone = rgd->rd_free;
1139 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1140 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1144 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1146 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1147 struct gfs2_sbd *sdp = rgd->rd_sbd;
1149 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1151 return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
1155 * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1156 * @gh: The glock holder for the resource group
1160 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1162 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1163 int x, length = rgd->rd_length;
1165 for (x = 0; x < length; x++) {
1166 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1175 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1176 struct buffer_head *bh,
1177 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1179 struct super_block *sb = sdp->sd_vfs;
1180 struct block_device *bdev = sb->s_bdev;
1181 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
1182 bdev_logical_block_size(sb->s_bdev);
1185 sector_t nr_sects = 0;
1191 for (x = 0; x < bi->bi_len; x++) {
1192 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1193 clone += bi->bi_offset;
1196 const u8 *orig = bh->b_data + bi->bi_offset + x;
1197 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1199 diff = ~(*clone | (*clone >> 1));
1204 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1205 blk *= sects_per_blk; /* convert to sectors */
1209 goto start_new_extent;
1210 if ((start + nr_sects) != blk) {
1211 if (nr_sects >= minlen) {
1212 rv = blkdev_issue_discard(bdev,
1217 trimmed += nr_sects;
1223 nr_sects += sects_per_blk;
1226 blk += sects_per_blk;
1229 if (nr_sects >= minlen) {
1230 rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
1233 trimmed += nr_sects;
1236 *ptrimmed = trimmed;
1240 if (sdp->sd_args.ar_discard)
1241 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1242 sdp->sd_args.ar_discard = 0;
1247 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1248 * @filp: Any file on the filesystem
1249 * @argp: Pointer to the arguments (also used to pass result)
1251 * Returns: 0 on success, otherwise error code
1254 int gfs2_fitrim(struct file *filp, void __user *argp)
1256 struct inode *inode = filp->f_dentry->d_inode;
1257 struct gfs2_sbd *sdp = GFS2_SB(inode);
1258 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1259 struct buffer_head *bh;
1260 struct gfs2_rgrpd *rgd;
1261 struct gfs2_rgrpd *rgd_end;
1262 struct gfs2_holder gh;
1263 struct fstrim_range r;
1267 u64 start, end, minlen;
1269 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1271 if (!capable(CAP_SYS_ADMIN))
1274 if (!blk_queue_discard(q))
1277 if (copy_from_user(&r, argp, sizeof(r)))
1280 ret = gfs2_rindex_update(sdp);
1284 start = r.start >> bs_shift;
1285 end = start + (r.len >> bs_shift);
1286 minlen = max_t(u64, r.minlen,
1287 q->limits.discard_granularity) >> bs_shift;
1289 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1290 rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0);
1293 minlen > sdp->sd_max_rg_data ||
1294 start > rgd_end->rd_data0 + rgd_end->rd_data)
1299 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1303 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1304 /* Trim each bitmap in the rgrp */
1305 for (x = 0; x < rgd->rd_length; x++) {
1306 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1307 ret = gfs2_rgrp_send_discards(sdp,
1308 rgd->rd_data0, NULL, bi, minlen,
1311 gfs2_glock_dq_uninit(&gh);
1317 /* Mark rgrp as having been trimmed */
1318 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1320 bh = rgd->rd_bits[0].bi_bh;
1321 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1322 gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
1323 gfs2_rgrp_out(rgd, bh->b_data);
1324 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1325 gfs2_trans_end(sdp);
1328 gfs2_glock_dq_uninit(&gh);
1333 rgd = gfs2_rgrpd_get_next(rgd);
1337 r.len = trimmed << 9;
1338 if (copy_to_user(argp, &r, sizeof(r)))
1345 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1346 * @ip: the inode structure
1349 static void rs_insert(struct gfs2_inode *ip)
1351 struct rb_node **newn, *parent = NULL;
1353 struct gfs2_blkreserv *rs = ip->i_res;
1354 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1355 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1357 BUG_ON(gfs2_rs_active(rs));
1359 spin_lock(&rgd->rd_rsspin);
1360 newn = &rgd->rd_rstree.rb_node;
1362 struct gfs2_blkreserv *cur =
1363 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1366 rc = rs_cmp(fsblock, rs->rs_free, cur);
1368 newn = &((*newn)->rb_right);
1370 newn = &((*newn)->rb_left);
1372 spin_unlock(&rgd->rd_rsspin);
1378 rb_link_node(&rs->rs_node, parent, newn);
1379 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1381 /* Do our rgrp accounting for the reservation */
1382 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1383 spin_unlock(&rgd->rd_rsspin);
1384 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1388 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1389 * @rgd: the resource group descriptor
1390 * @ip: pointer to the inode for which we're reserving blocks
1391 * @requested: number of blocks required for this allocation
1395 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1398 struct gfs2_rbm rbm = { .rgd = rgd, };
1400 struct gfs2_blkreserv *rs = ip->i_res;
1402 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1405 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
1406 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1407 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1410 /* Find bitmap block that contains bits for goal block */
1411 if (rgrp_contains_block(rgd, ip->i_goal))
1414 goal = rgd->rd_last_alloc + rgd->rd_data0;
1416 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1419 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true);
1422 rs->rs_free = extlen;
1423 rs->rs_inum = ip->i_no_addr;
1429 * gfs2_next_unreserved_block - Return next block that is not reserved
1430 * @rgd: The resource group
1431 * @block: The starting block
1432 * @length: The required length
1433 * @ip: Ignore any reservations for this inode
1435 * If the block does not appear in any reservation, then return the
1436 * block number unchanged. If it does appear in the reservation, then
1437 * keep looking through the tree of reservations in order to find the
1438 * first block number which is not reserved.
1441 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1443 const struct gfs2_inode *ip)
1445 struct gfs2_blkreserv *rs;
1449 spin_lock(&rgd->rd_rsspin);
1450 n = rgd->rd_rstree.rb_node;
1452 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1453 rc = rs_cmp(block, length, rs);
1463 while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) {
1464 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1468 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1472 spin_unlock(&rgd->rd_rsspin);
1477 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1478 * @rbm: The current position in the resource group
1479 * @ip: The inode for which we are searching for blocks
1480 * @minext: The minimum extent length
1482 * This checks the current position in the rgrp to see whether there is
1483 * a reservation covering this block. If not then this function is a
1484 * no-op. If there is, then the position is moved to the end of the
1485 * contiguous reservation(s) so that we are pointing at the first
1486 * non-reserved block.
1488 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1491 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1492 const struct gfs2_inode *ip,
1495 u64 block = gfs2_rbm_to_block(rbm);
1501 * If we have a minimum extent length, then skip over any extent
1502 * which is less than the min extent length in size.
1505 extlen = gfs2_free_extlen(rbm, minext);
1506 nblock = block + extlen;
1507 if (extlen < minext)
1512 * Check the extent which has been found against the reservations
1513 * and skip if parts of it are already reserved
1515 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1516 if (nblock == block)
1519 ret = gfs2_rbm_from_block(rbm, nblock);
1526 * gfs2_rbm_find - Look for blocks of a particular state
1527 * @rbm: Value/result starting position and final position
1528 * @state: The state which we want to find
1529 * @minext: The requested extent length (0 for a single block)
1530 * @ip: If set, check for reservations
1531 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1532 * around until we've reached the starting point.
1535 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1536 * has no free blocks in it.
1538 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1541 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1542 const struct gfs2_inode *ip, bool nowrap)
1544 struct buffer_head *bh;
1545 struct gfs2_bitmap *initial_bi;
1551 int iters = rbm->rgd->rd_length;
1554 /* If we are not starting at the beginning of a bitmap, then we
1555 * need to add one to the bitmap count to ensure that we search
1556 * the starting bitmap twice.
1558 if (rbm->offset != 0)
1562 if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
1563 (state == GFS2_BLKST_FREE))
1566 bh = rbm->bi->bi_bh;
1567 buffer = bh->b_data + rbm->bi->bi_offset;
1568 WARN_ON(!buffer_uptodate(bh));
1569 if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
1570 buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
1571 initial_offset = rbm->offset;
1572 offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
1573 if (offset == BFITNOENT)
1575 rbm->offset = offset;
1579 initial_bi = rbm->bi;
1580 ret = gfs2_reservation_check_and_update(rbm, ip, minext);
1584 n += (rbm->bi - initial_bi);
1587 if (ret == -E2BIG) {
1590 n += (rbm->bi - initial_bi);
1591 goto res_covered_end_of_rgrp;
1595 bitmap_full: /* Mark bitmap as full and fall through */
1596 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1597 set_bit(GBF_FULL, &rbm->bi->bi_flags);
1599 next_bitmap: /* Find next bitmap in the rgrp */
1601 index = rbm->bi - rbm->rgd->rd_bits;
1603 if (index == rbm->rgd->rd_length)
1605 res_covered_end_of_rgrp:
1606 rbm->bi = &rbm->rgd->rd_bits[index];
1607 if ((index == 0) && nowrap)
1619 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1621 * @last_unlinked: block address of the last dinode we unlinked
1622 * @skip: block address we should explicitly not unlink
1624 * Returns: 0 if no error
1625 * The inode, if one has been found, in inode.
1628 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1631 struct gfs2_sbd *sdp = rgd->rd_sbd;
1632 struct gfs2_glock *gl;
1633 struct gfs2_inode *ip;
1636 struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
1639 down_write(&sdp->sd_log_flush_lock);
1640 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true);
1641 up_write(&sdp->sd_log_flush_lock);
1642 if (error == -ENOSPC)
1644 if (WARN_ON_ONCE(error))
1647 block = gfs2_rbm_to_block(&rbm);
1648 if (gfs2_rbm_from_block(&rbm, block + 1))
1650 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1654 *last_unlinked = block;
1656 error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
1660 /* If the inode is already in cache, we can ignore it here
1661 * because the existing inode disposal code will deal with
1662 * it when all refs have gone away. Accessing gl_object like
1663 * this is not safe in general. Here it is ok because we do
1664 * not dereference the pointer, and we only need an approx
1665 * answer to whether it is NULL or not.
1669 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1674 /* Limit reclaim to sensible number of tasks */
1675 if (found > NR_CPUS)
1679 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1684 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1685 * @rgd: The rgrp in question
1686 * @loops: An indication of how picky we can be (0=very, 1=less so)
1688 * This function uses the recently added glock statistics in order to
1689 * figure out whether a parciular resource group is suffering from
1690 * contention from multiple nodes. This is done purely on the basis
1691 * of timings, since this is the only data we have to work with and
1692 * our aim here is to reject a resource group which is highly contended
1693 * but (very important) not to do this too often in order to ensure that
1694 * we do not land up introducing fragmentation by changing resource
1695 * groups when not actually required.
1697 * The calculation is fairly simple, we want to know whether the SRTTB
1698 * (i.e. smoothed round trip time for blocking operations) to acquire
1699 * the lock for this rgrp's glock is significantly greater than the
1700 * time taken for resource groups on average. We introduce a margin in
1701 * the form of the variable @var which is computed as the sum of the two
1702 * respective variences, and multiplied by a factor depending on @loops
1703 * and whether we have a lot of data to base the decision on. This is
1704 * then tested against the square difference of the means in order to
1705 * decide whether the result is statistically significant or not.
1707 * Returns: A boolean verdict on the congestion status
1710 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1712 const struct gfs2_glock *gl = rgd->rd_gl;
1713 const struct gfs2_sbd *sdp = gl->gl_sbd;
1714 struct gfs2_lkstats *st;
1715 s64 r_dcount, l_dcount;
1716 s64 r_srttb, l_srttb;
1722 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1723 r_srttb = st->stats[GFS2_LKS_SRTTB];
1724 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1725 var = st->stats[GFS2_LKS_SRTTVARB] +
1726 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1729 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1730 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1732 if ((l_dcount < 1) || (r_dcount < 1) || (r_srttb == 0))
1735 srttb_diff = r_srttb - l_srttb;
1736 sqr_diff = srttb_diff * srttb_diff;
1739 if (l_dcount < 8 || r_dcount < 8)
1744 return ((srttb_diff < 0) && (sqr_diff > var));
1748 * gfs2_rgrp_used_recently
1749 * @rs: The block reservation with the rgrp to test
1750 * @msecs: The time limit in milliseconds
1752 * Returns: True if the rgrp glock has been used within the time limit
1754 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1759 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1760 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1762 return tdiff > (msecs * 1000 * 1000);
1765 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1767 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1770 get_random_bytes(&skip, sizeof(skip));
1771 return skip % sdp->sd_rgrps;
1774 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1776 struct gfs2_rgrpd *rgd = *pos;
1777 struct gfs2_sbd *sdp = rgd->rd_sbd;
1779 rgd = gfs2_rgrpd_get_next(rgd);
1781 rgd = gfs2_rgrpd_get_first(sdp);
1783 if (rgd != begin) /* If we didn't wrap */
1789 * gfs2_inplace_reserve - Reserve space in the filesystem
1790 * @ip: the inode to reserve space for
1791 * @requested: the number of blocks to be reserved
1796 int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
1798 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1799 struct gfs2_rgrpd *begin = NULL;
1800 struct gfs2_blkreserv *rs = ip->i_res;
1801 int error = 0, rg_locked, flags = 0;
1802 u64 last_unlinked = NO_BLOCK;
1806 if (sdp->sd_args.ar_rgrplvb)
1808 if (gfs2_assert_warn(sdp, requested))
1810 if (gfs2_rs_active(rs)) {
1811 begin = rs->rs_rbm.rgd;
1812 flags = 0; /* Yoda: Do or do not. There is no try */
1813 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1814 rs->rs_rbm.rgd = begin = ip->i_rgd;
1816 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1818 if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
1819 skip = gfs2_orlov_skip(ip);
1820 if (rs->rs_rbm.rgd == NULL)
1826 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
1830 if (!gfs2_rs_active(rs) && (loops < 2) &&
1831 gfs2_rgrp_used_recently(rs, 1000) &&
1832 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
1834 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
1835 LM_ST_EXCLUSIVE, flags,
1837 if (unlikely(error))
1839 if (!gfs2_rs_active(rs) && (loops < 2) &&
1840 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
1842 if (sdp->sd_args.ar_rgrplvb) {
1843 error = update_rgrp_lvb(rs->rs_rbm.rgd);
1844 if (unlikely(error)) {
1845 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
1851 /* Skip unuseable resource groups */
1852 if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
1855 if (sdp->sd_args.ar_rgrplvb)
1856 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
1858 /* Get a reservation if we don't already have one */
1859 if (!gfs2_rs_active(rs))
1860 rg_mblk_search(rs->rs_rbm.rgd, ip, requested);
1862 /* Skip rgrps when we can't get a reservation on first pass */
1863 if (!gfs2_rs_active(rs) && (loops < 1))
1866 /* If rgrp has enough free space, use it */
1867 if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
1868 ip->i_rgd = rs->rs_rbm.rgd;
1872 /* Drop reservation, if we couldn't use reserved rgrp */
1873 if (gfs2_rs_active(rs))
1874 gfs2_rs_deltree(ip, rs);
1876 /* Check for unlinked inodes which can be reclaimed */
1877 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
1878 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
1881 /* Unlock rgrp if required */
1883 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
1885 /* Find the next rgrp, and continue looking */
1886 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
1891 /* If we've scanned all the rgrps, but found no free blocks
1892 * then this checks for some less likely conditions before
1896 /* Check that fs hasn't grown if writing to rindex */
1897 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
1898 error = gfs2_ri_update(ip);
1902 /* Flushing the log may release space */
1904 gfs2_log_flush(sdp, NULL);
1911 * gfs2_inplace_release - release an inplace reservation
1912 * @ip: the inode the reservation was taken out on
1914 * Release a reservation made by gfs2_inplace_reserve().
1917 void gfs2_inplace_release(struct gfs2_inode *ip)
1919 struct gfs2_blkreserv *rs = ip->i_res;
1921 if (rs->rs_rgd_gh.gh_gl)
1922 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
1926 * gfs2_get_block_type - Check a block in a RG is of given type
1927 * @rgd: the resource group holding the block
1928 * @block: the block number
1930 * Returns: The block type (GFS2_BLKST_*)
1933 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
1935 struct gfs2_rbm rbm = { .rgd = rgd, };
1938 ret = gfs2_rbm_from_block(&rbm, block);
1939 WARN_ON_ONCE(ret != 0);
1941 return gfs2_testbit(&rbm);
1946 * gfs2_alloc_extent - allocate an extent from a given bitmap
1947 * @rbm: the resource group information
1948 * @dinode: TRUE if the first block we allocate is for a dinode
1949 * @n: The extent length (value/result)
1951 * Add the bitmap buffer to the transaction.
1952 * Set the found bits to @new_state to change block's allocation state.
1954 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1957 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
1958 const unsigned int elen = *n;
1963 block = gfs2_rbm_to_block(rbm);
1964 gfs2_trans_add_bh(rbm->rgd->rd_gl, rbm->bi->bi_bh, 1);
1965 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
1968 ret = gfs2_rbm_from_block(&pos, block);
1969 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
1971 gfs2_trans_add_bh(pos.rgd->rd_gl, pos.bi->bi_bh, 1);
1972 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
1979 * rgblk_free - Change alloc state of given block(s)
1980 * @sdp: the filesystem
1981 * @bstart: the start of a run of blocks to free
1982 * @blen: the length of the block run (all must lie within ONE RG!)
1983 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1985 * Returns: Resource group containing the block(s)
1988 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1989 u32 blen, unsigned char new_state)
1991 struct gfs2_rbm rbm;
1993 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
1995 if (gfs2_consist(sdp))
1996 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2001 gfs2_rbm_from_block(&rbm, bstart);
2003 if (!rbm.bi->bi_clone) {
2004 rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
2005 GFP_NOFS | __GFP_NOFAIL);
2006 memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
2007 rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
2010 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1);
2011 gfs2_setbit(&rbm, false, new_state);
2018 * gfs2_rgrp_dump - print out an rgrp
2019 * @seq: The iterator
2020 * @gl: The glock in question
2024 int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2026 struct gfs2_rgrpd *rgd = gl->gl_object;
2027 struct gfs2_blkreserv *trs;
2028 const struct rb_node *n;
2032 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
2033 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2034 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2036 spin_lock(&rgd->rd_rsspin);
2037 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2038 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2041 spin_unlock(&rgd->rd_rsspin);
2045 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2047 struct gfs2_sbd *sdp = rgd->rd_sbd;
2048 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2049 (unsigned long long)rgd->rd_addr);
2050 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2051 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2052 rgd->rd_flags |= GFS2_RDF_ERROR;
2056 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2057 * @ip: The inode we have just allocated blocks for
2058 * @rbm: The start of the allocated blocks
2059 * @len: The extent length
2061 * Adjusts a reservation after an allocation has taken place. If the
2062 * reservation does not match the allocation, or if it is now empty
2063 * then it is removed.
2066 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2067 const struct gfs2_rbm *rbm, unsigned len)
2069 struct gfs2_blkreserv *rs = ip->i_res;
2070 struct gfs2_rgrpd *rgd = rbm->rgd;
2075 spin_lock(&rgd->rd_rsspin);
2076 if (gfs2_rs_active(rs)) {
2077 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2078 block = gfs2_rbm_to_block(rbm);
2079 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2080 rlen = min(rs->rs_free, len);
2081 rs->rs_free -= rlen;
2082 rgd->rd_reserved -= rlen;
2083 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2084 if (rs->rs_free && !ret)
2087 __rs_deltree(ip, rs);
2090 spin_unlock(&rgd->rd_rsspin);
2094 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2095 * @ip: the inode to allocate the block for
2096 * @bn: Used to return the starting block number
2097 * @nblocks: requested number of blocks/extent length (value/result)
2098 * @dinode: 1 if we're allocating a dinode block, else 0
2099 * @generation: the generation number of the inode
2101 * Returns: 0 or error
2104 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2105 bool dinode, u64 *generation)
2107 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2108 struct buffer_head *dibh;
2109 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2112 u64 block; /* block, within the file system scope */
2115 if (gfs2_rs_active(ip->i_res))
2116 goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
2117 else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
2120 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
2122 gfs2_rbm_from_block(&rbm, goal);
2123 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
2125 if (error == -ENOSPC) {
2126 gfs2_rbm_from_block(&rbm, goal);
2127 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
2130 /* Since all blocks are reserved in advance, this shouldn't happen */
2132 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n",
2133 (unsigned long long)ip->i_no_addr, error, *nblocks,
2134 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
2138 gfs2_alloc_extent(&rbm, dinode, nblocks);
2139 block = gfs2_rbm_to_block(&rbm);
2140 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2141 if (gfs2_rs_active(ip->i_res))
2142 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2148 ip->i_goal = block + ndata - 1;
2149 error = gfs2_meta_inode_buffer(ip, &dibh);
2151 struct gfs2_dinode *di =
2152 (struct gfs2_dinode *)dibh->b_data;
2153 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
2154 di->di_goal_meta = di->di_goal_data =
2155 cpu_to_be64(ip->i_goal);
2159 if (rbm.rgd->rd_free < *nblocks) {
2160 printk(KERN_WARNING "nblocks=%u\n", *nblocks);
2164 rbm.rgd->rd_free -= *nblocks;
2166 rbm.rgd->rd_dinodes++;
2167 *generation = rbm.rgd->rd_igeneration++;
2168 if (*generation == 0)
2169 *generation = rbm.rgd->rd_igeneration++;
2172 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
2173 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2174 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2176 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2178 gfs2_trans_add_unrevoke(sdp, block, 1);
2181 * This needs reviewing to see why we cannot do the quota change
2182 * at this point in the dinode case.
2185 gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
2188 rbm.rgd->rd_free_clone -= *nblocks;
2189 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2190 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2195 gfs2_rgrp_error(rbm.rgd);
2200 * __gfs2_free_blocks - free a contiguous run of block(s)
2201 * @ip: the inode these blocks are being freed from
2202 * @bstart: first block of a run of contiguous blocks
2203 * @blen: the length of the block run
2204 * @meta: 1 if the blocks represent metadata
2208 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2210 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2211 struct gfs2_rgrpd *rgd;
2213 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2216 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2217 rgd->rd_free += blen;
2218 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2219 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2220 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2221 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2223 /* Directories keep their data in the metadata address space */
2224 if (meta || ip->i_depth)
2225 gfs2_meta_wipe(ip, bstart, blen);
2229 * gfs2_free_meta - free a contiguous run of data block(s)
2230 * @ip: the inode these blocks are being freed from
2231 * @bstart: first block of a run of contiguous blocks
2232 * @blen: the length of the block run
2236 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2238 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2240 __gfs2_free_blocks(ip, bstart, blen, 1);
2241 gfs2_statfs_change(sdp, 0, +blen, 0);
2242 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2245 void gfs2_unlink_di(struct inode *inode)
2247 struct gfs2_inode *ip = GFS2_I(inode);
2248 struct gfs2_sbd *sdp = GFS2_SB(inode);
2249 struct gfs2_rgrpd *rgd;
2250 u64 blkno = ip->i_no_addr;
2252 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2255 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2256 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2257 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2258 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2259 update_rgrp_lvb_unlinked(rgd, 1);
2262 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2264 struct gfs2_sbd *sdp = rgd->rd_sbd;
2265 struct gfs2_rgrpd *tmp_rgd;
2267 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2270 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2272 if (!rgd->rd_dinodes)
2273 gfs2_consist_rgrpd(rgd);
2277 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2278 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2279 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2280 update_rgrp_lvb_unlinked(rgd, -1);
2282 gfs2_statfs_change(sdp, 0, +1, -1);
2286 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2288 gfs2_free_uninit_di(rgd, ip->i_no_addr);
2289 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2290 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2291 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2295 * gfs2_check_blk_type - Check the type of a block
2296 * @sdp: The superblock
2297 * @no_addr: The block number to check
2298 * @type: The block type we are looking for
2300 * Returns: 0 if the block type matches the expected type
2301 * -ESTALE if it doesn't match
2302 * or -ve errno if something went wrong while checking
2305 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2307 struct gfs2_rgrpd *rgd;
2308 struct gfs2_holder rgd_gh;
2309 int error = -EINVAL;
2311 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2315 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2319 if (gfs2_get_block_type(rgd, no_addr) != type)
2322 gfs2_glock_dq_uninit(&rgd_gh);
2328 * gfs2_rlist_add - add a RG to a list of RGs
2330 * @rlist: the list of resource groups
2333 * Figure out what RG a block belongs to and add that RG to the list
2335 * FIXME: Don't use NOFAIL
2339 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2342 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2343 struct gfs2_rgrpd *rgd;
2344 struct gfs2_rgrpd **tmp;
2345 unsigned int new_space;
2348 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2351 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2354 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2356 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2361 for (x = 0; x < rlist->rl_rgrps; x++)
2362 if (rlist->rl_rgd[x] == rgd)
2365 if (rlist->rl_rgrps == rlist->rl_space) {
2366 new_space = rlist->rl_space + 10;
2368 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2369 GFP_NOFS | __GFP_NOFAIL);
2371 if (rlist->rl_rgd) {
2372 memcpy(tmp, rlist->rl_rgd,
2373 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2374 kfree(rlist->rl_rgd);
2377 rlist->rl_space = new_space;
2378 rlist->rl_rgd = tmp;
2381 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2385 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2386 * and initialize an array of glock holders for them
2387 * @rlist: the list of resource groups
2388 * @state: the lock state to acquire the RG lock in
2390 * FIXME: Don't use NOFAIL
2394 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2398 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
2399 GFP_NOFS | __GFP_NOFAIL);
2400 for (x = 0; x < rlist->rl_rgrps; x++)
2401 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2407 * gfs2_rlist_free - free a resource group list
2408 * @list: the list of resource groups
2412 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2416 kfree(rlist->rl_rgd);
2418 if (rlist->rl_ghs) {
2419 for (x = 0; x < rlist->rl_rgrps; x++)
2420 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2421 kfree(rlist->rl_ghs);
2422 rlist->rl_ghs = NULL;