2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trans_priv.h"
31 #include "xfs_error.h"
32 #include "xfs_trace.h"
35 * Check to see if a buffer matching the given parameters is already
36 * a part of the given transaction.
38 STATIC struct xfs_buf *
39 xfs_trans_buf_item_match(
41 struct xfs_buftarg *target,
42 struct xfs_buf_map *map,
45 struct xfs_log_item_desc *lidp;
46 struct xfs_buf_log_item *blip;
50 for (i = 0; i < nmaps; i++)
53 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
54 blip = (struct xfs_buf_log_item *)lidp->lid_item;
55 if (blip->bli_item.li_type == XFS_LI_BUF &&
56 blip->bli_buf->b_target == target &&
57 XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
58 blip->bli_buf->b_length == len) {
59 ASSERT(blip->bli_buf->b_map_count == nmaps);
68 * Add the locked buffer to the transaction.
70 * The buffer must be locked, and it cannot be associated with any
73 * If the buffer does not yet have a buf log item associated with it,
74 * then allocate one for it. Then add the buf item to the transaction.
82 struct xfs_buf_log_item *bip;
84 ASSERT(bp->b_transp == NULL);
87 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
88 * it doesn't have one yet, then allocate one and initialize it.
89 * The checks to see if one is there are in xfs_buf_item_init().
91 xfs_buf_item_init(bp, tp->t_mountp);
93 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
94 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
95 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
100 * Take a reference for this transaction on the buf item.
102 atomic_inc(&bip->bli_refcount);
105 * Get a log_item_desc to point at the new item.
107 xfs_trans_add_item(tp, &bip->bli_item);
110 * Initialize b_fsprivate2 so we can find it with incore_match()
111 * in xfs_trans_get_buf() and friends above.
119 struct xfs_trans *tp,
122 _xfs_trans_bjoin(tp, bp, 0);
123 trace_xfs_trans_bjoin(bp->b_fspriv);
127 * Get and lock the buffer for the caller if it is not already
128 * locked within the given transaction. If it is already locked
129 * within the transaction, just increment its lock recursion count
130 * and return a pointer to it.
132 * If the transaction pointer is NULL, make this just a normal
136 xfs_trans_get_buf_map(
137 struct xfs_trans *tp,
138 struct xfs_buftarg *target,
139 struct xfs_buf_map *map,
141 xfs_buf_flags_t flags)
144 xfs_buf_log_item_t *bip;
147 return xfs_buf_get_map(target, map, nmaps, flags);
150 * If we find the buffer in the cache with this transaction
151 * pointer in its b_fsprivate2 field, then we know we already
152 * have it locked. In this case we just increment the lock
153 * recursion count and return the buffer to the caller.
155 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
157 ASSERT(xfs_buf_islocked(bp));
158 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
163 ASSERT(bp->b_transp == tp);
166 ASSERT(atomic_read(&bip->bli_refcount) > 0);
168 trace_xfs_trans_get_buf_recur(bip);
172 bp = xfs_buf_get_map(target, map, nmaps, flags);
177 ASSERT(!bp->b_error);
179 _xfs_trans_bjoin(tp, bp, 1);
180 trace_xfs_trans_get_buf(bp->b_fspriv);
185 * Get and lock the superblock buffer of this file system for the
188 * We don't need to use incore_match() here, because the superblock
189 * buffer is a private buffer which we keep a pointer to in the
193 xfs_trans_getsb(xfs_trans_t *tp,
194 struct xfs_mount *mp,
198 xfs_buf_log_item_t *bip;
201 * Default to just trying to lock the superblock buffer
205 return xfs_getsb(mp, flags);
208 * If the superblock buffer already has this transaction
209 * pointer in its b_fsprivate2 field, then we know we already
210 * have it locked. In this case we just increment the lock
211 * recursion count and return the buffer to the caller.
214 if (bp->b_transp == tp) {
217 ASSERT(atomic_read(&bip->bli_refcount) > 0);
219 trace_xfs_trans_getsb_recur(bip);
223 bp = xfs_getsb(mp, flags);
227 _xfs_trans_bjoin(tp, bp, 1);
228 trace_xfs_trans_getsb(bp->b_fspriv);
233 xfs_buftarg_t *xfs_error_target;
236 int xfs_error_mod = 33;
240 * Get and lock the buffer for the caller if it is not already
241 * locked within the given transaction. If it has not yet been
242 * read in, read it from disk. If it is already locked
243 * within the transaction and already read in, just increment its
244 * lock recursion count and return a pointer to it.
246 * If the transaction pointer is NULL, make this just a normal
250 xfs_trans_read_buf_map(
251 struct xfs_mount *mp,
252 struct xfs_trans *tp,
253 struct xfs_buftarg *target,
254 struct xfs_buf_map *map,
256 xfs_buf_flags_t flags,
257 struct xfs_buf **bpp,
258 const struct xfs_buf_ops *ops)
261 xfs_buf_log_item_t *bip;
266 bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
268 return (flags & XBF_TRYLOCK) ?
273 xfs_buf_ioerror_alert(bp, __func__);
278 /* bad CRC means corrupted metadata */
279 if (error == -EFSBADCRC)
280 error = -EFSCORRUPTED;
285 if (xfs_error_target == target) {
286 if (((xfs_req_num++) % xfs_error_mod) == 0) {
288 xfs_debug(mp, "Returning error!");
294 if (XFS_FORCED_SHUTDOWN(mp))
301 * If we find the buffer in the cache with this transaction
302 * pointer in its b_fsprivate2 field, then we know we already
303 * have it locked. If it is already read in we just increment
304 * the lock recursion count and return the buffer to the caller.
305 * If the buffer is not yet read in, then we read it in, increment
306 * the lock recursion count, and return it to the caller.
308 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
310 ASSERT(xfs_buf_islocked(bp));
311 ASSERT(bp->b_transp == tp);
312 ASSERT(bp->b_fspriv != NULL);
313 ASSERT(!bp->b_error);
314 if (!(XFS_BUF_ISDONE(bp))) {
315 trace_xfs_trans_read_buf_io(bp, _RET_IP_);
316 ASSERT(!XFS_BUF_ISASYNC(bp));
317 ASSERT(bp->b_iodone == NULL);
322 * XXX(hch): clean up the error handling here to be less
325 if (XFS_FORCED_SHUTDOWN(mp)) {
326 trace_xfs_bdstrat_shut(bp, _RET_IP_);
327 bp->b_flags &= ~(XBF_READ | XBF_DONE);
328 xfs_buf_ioerror(bp, -EIO);
334 xfs_buf_iorequest(bp);
335 error = xfs_buf_iowait(bp);
337 xfs_buf_ioerror_alert(bp, __func__);
340 * We can gracefully recover from most read
341 * errors. Ones we can't are those that happen
342 * after the transaction's already dirty.
344 if (tp->t_flags & XFS_TRANS_DIRTY)
345 xfs_force_shutdown(tp->t_mountp,
346 SHUTDOWN_META_IO_ERROR);
347 /* bad CRC means corrupted metadata */
348 if (error == -EFSBADCRC)
349 error = -EFSCORRUPTED;
354 * We never locked this buf ourselves, so we shouldn't
355 * brelse it either. Just get out.
357 if (XFS_FORCED_SHUTDOWN(mp)) {
358 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
367 ASSERT(atomic_read(&bip->bli_refcount) > 0);
368 trace_xfs_trans_read_buf_recur(bip);
373 bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
376 return (flags & XBF_TRYLOCK) ?
383 xfs_buf_ioerror_alert(bp, __func__);
384 if (tp->t_flags & XFS_TRANS_DIRTY)
385 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
388 /* bad CRC means corrupted metadata */
389 if (error == -EFSBADCRC)
390 error = -EFSCORRUPTED;
394 if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
395 if (xfs_error_target == target) {
396 if (((xfs_req_num++) % xfs_error_mod) == 0) {
397 xfs_force_shutdown(tp->t_mountp,
398 SHUTDOWN_META_IO_ERROR);
400 xfs_debug(mp, "Returning trans error!");
406 if (XFS_FORCED_SHUTDOWN(mp))
409 _xfs_trans_bjoin(tp, bp, 1);
410 trace_xfs_trans_read_buf(bp->b_fspriv);
416 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
423 * Release the buffer bp which was previously acquired with one of the
424 * xfs_trans_... buffer allocation routines if the buffer has not
425 * been modified within this transaction. If the buffer is modified
426 * within this transaction, do decrement the recursion count but do
427 * not release the buffer even if the count goes to 0. If the buffer is not
428 * modified within the transaction, decrement the recursion count and
429 * release the buffer if the recursion count goes to 0.
431 * If the buffer is to be released and it was not modified before
432 * this transaction began, then free the buf_log_item associated with it.
434 * If the transaction pointer is NULL, make this just a normal
438 xfs_trans_brelse(xfs_trans_t *tp,
441 xfs_buf_log_item_t *bip;
444 * Default to a normal brelse() call if the tp is NULL.
447 ASSERT(bp->b_transp == NULL);
452 ASSERT(bp->b_transp == tp);
454 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
455 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
456 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
457 ASSERT(atomic_read(&bip->bli_refcount) > 0);
459 trace_xfs_trans_brelse(bip);
462 * If the release is just for a recursive lock,
463 * then decrement the count and return.
465 if (bip->bli_recur > 0) {
471 * If the buffer is dirty within this transaction, we can't
472 * release it until we commit.
474 if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
478 * If the buffer has been invalidated, then we can't release
479 * it until the transaction commits to disk unless it is re-dirtied
480 * as part of this transaction. This prevents us from pulling
481 * the item from the AIL before we should.
483 if (bip->bli_flags & XFS_BLI_STALE)
486 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
489 * Free up the log item descriptor tracking the released item.
491 xfs_trans_del_item(&bip->bli_item);
494 * Clear the hold flag in the buf log item if it is set.
495 * We wouldn't want the next user of the buffer to
498 if (bip->bli_flags & XFS_BLI_HOLD) {
499 bip->bli_flags &= ~XFS_BLI_HOLD;
503 * Drop our reference to the buf log item.
505 atomic_dec(&bip->bli_refcount);
508 * If the buf item is not tracking data in the log, then
509 * we must free it before releasing the buffer back to the
510 * free pool. Before releasing the buffer to the free pool,
511 * clear the transaction pointer in b_fsprivate2 to dissolve
512 * its relation to this transaction.
514 if (!xfs_buf_item_dirty(bip)) {
516 ASSERT(bp->b_pincount == 0);
518 ASSERT(atomic_read(&bip->bli_refcount) == 0);
519 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
520 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
521 xfs_buf_item_relse(bp);
529 * Mark the buffer as not needing to be unlocked when the buf item's
530 * iop_unlock() routine is called. The buffer must already be locked
531 * and associated with the given transaction.
535 xfs_trans_bhold(xfs_trans_t *tp,
538 xfs_buf_log_item_t *bip = bp->b_fspriv;
540 ASSERT(bp->b_transp == tp);
542 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
543 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
544 ASSERT(atomic_read(&bip->bli_refcount) > 0);
546 bip->bli_flags |= XFS_BLI_HOLD;
547 trace_xfs_trans_bhold(bip);
551 * Cancel the previous buffer hold request made on this buffer
552 * for this transaction.
555 xfs_trans_bhold_release(xfs_trans_t *tp,
558 xfs_buf_log_item_t *bip = bp->b_fspriv;
560 ASSERT(bp->b_transp == tp);
562 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
563 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
564 ASSERT(atomic_read(&bip->bli_refcount) > 0);
565 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
567 bip->bli_flags &= ~XFS_BLI_HOLD;
568 trace_xfs_trans_bhold_release(bip);
572 * This is called to mark bytes first through last inclusive of the given
573 * buffer as needing to be logged when the transaction is committed.
574 * The buffer must already be associated with the given transaction.
576 * First and last are numbers relative to the beginning of this buffer,
577 * so the first byte in the buffer is numbered 0 regardless of the
581 xfs_trans_log_buf(xfs_trans_t *tp,
586 xfs_buf_log_item_t *bip = bp->b_fspriv;
588 ASSERT(bp->b_transp == tp);
590 ASSERT(first <= last && last < BBTOB(bp->b_length));
591 ASSERT(bp->b_iodone == NULL ||
592 bp->b_iodone == xfs_buf_iodone_callbacks);
595 * Mark the buffer as needing to be written out eventually,
596 * and set its iodone function to remove the buffer's buf log
597 * item from the AIL and free it when the buffer is flushed
598 * to disk. See xfs_buf_attach_iodone() for more details
599 * on li_cb and xfs_buf_iodone_callbacks().
600 * If we end up aborting this transaction, we trap this buffer
601 * inside the b_bdstrat callback so that this won't get written to
606 ASSERT(atomic_read(&bip->bli_refcount) > 0);
607 bp->b_iodone = xfs_buf_iodone_callbacks;
608 bip->bli_item.li_cb = xfs_buf_iodone;
610 trace_xfs_trans_log_buf(bip);
613 * If we invalidated the buffer within this transaction, then
614 * cancel the invalidation now that we're dirtying the buffer
615 * again. There are no races with the code in xfs_buf_item_unpin(),
616 * because we have a reference to the buffer this entire time.
618 if (bip->bli_flags & XFS_BLI_STALE) {
619 bip->bli_flags &= ~XFS_BLI_STALE;
620 ASSERT(XFS_BUF_ISSTALE(bp));
622 bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
625 tp->t_flags |= XFS_TRANS_DIRTY;
626 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
629 * If we have an ordered buffer we are not logging any dirty range but
630 * it still needs to be marked dirty and that it has been logged.
632 bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
633 if (!(bip->bli_flags & XFS_BLI_ORDERED))
634 xfs_buf_item_log(bip, first, last);
639 * Invalidate a buffer that is being used within a transaction.
641 * Typically this is because the blocks in the buffer are being freed, so we
642 * need to prevent it from being written out when we're done. Allowing it
643 * to be written again might overwrite data in the free blocks if they are
644 * reallocated to a file.
646 * We prevent the buffer from being written out by marking it stale. We can't
647 * get rid of the buf log item at this point because the buffer may still be
648 * pinned by another transaction. If that is the case, then we'll wait until
649 * the buffer is committed to disk for the last time (we can tell by the ref
650 * count) and free it in xfs_buf_item_unpin(). Until that happens we will
651 * keep the buffer locked so that the buffer and buf log item are not reused.
653 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
654 * the buf item. This will be used at recovery time to determine that copies
655 * of the buffer in the log before this should not be replayed.
657 * We mark the item descriptor and the transaction dirty so that we'll hold
658 * the buffer until after the commit.
660 * Since we're invalidating the buffer, we also clear the state about which
661 * parts of the buffer have been logged. We also clear the flag indicating
662 * that this is an inode buffer since the data in the buffer will no longer
665 * We set the stale bit in the buffer as well since we're getting rid of it.
672 xfs_buf_log_item_t *bip = bp->b_fspriv;
675 ASSERT(bp->b_transp == tp);
677 ASSERT(atomic_read(&bip->bli_refcount) > 0);
679 trace_xfs_trans_binval(bip);
681 if (bip->bli_flags & XFS_BLI_STALE) {
683 * If the buffer is already invalidated, then
686 ASSERT(XFS_BUF_ISSTALE(bp));
687 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
688 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
689 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
690 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
691 ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
692 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
698 bip->bli_flags |= XFS_BLI_STALE;
699 bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
700 bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
701 bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
702 bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK;
703 for (i = 0; i < bip->bli_format_count; i++) {
704 memset(bip->bli_formats[i].blf_data_map, 0,
705 (bip->bli_formats[i].blf_map_size * sizeof(uint)));
707 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
708 tp->t_flags |= XFS_TRANS_DIRTY;
712 * This call is used to indicate that the buffer contains on-disk inodes which
713 * must be handled specially during recovery. They require special handling
714 * because only the di_next_unlinked from the inodes in the buffer should be
715 * recovered. The rest of the data in the buffer is logged via the inodes
718 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
719 * transferred to the buffer's log format structure so that we'll know what to
720 * do at recovery time.
727 xfs_buf_log_item_t *bip = bp->b_fspriv;
729 ASSERT(bp->b_transp == tp);
731 ASSERT(atomic_read(&bip->bli_refcount) > 0);
733 bip->bli_flags |= XFS_BLI_INODE_BUF;
734 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
738 * This call is used to indicate that the buffer is going to
739 * be staled and was an inode buffer. This means it gets
740 * special processing during unpin - where any inodes
741 * associated with the buffer should be removed from ail.
742 * There is also special processing during recovery,
743 * any replay of the inodes in the buffer needs to be
744 * prevented as the buffer may have been reused.
747 xfs_trans_stale_inode_buf(
751 xfs_buf_log_item_t *bip = bp->b_fspriv;
753 ASSERT(bp->b_transp == tp);
755 ASSERT(atomic_read(&bip->bli_refcount) > 0);
757 bip->bli_flags |= XFS_BLI_STALE_INODE;
758 bip->bli_item.li_cb = xfs_buf_iodone;
759 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
763 * Mark the buffer as being one which contains newly allocated
764 * inodes. We need to make sure that even if this buffer is
765 * relogged as an 'inode buf' we still recover all of the inode
766 * images in the face of a crash. This works in coordination with
767 * xfs_buf_item_committed() to ensure that the buffer remains in the
768 * AIL at its original location even after it has been relogged.
772 xfs_trans_inode_alloc_buf(
776 xfs_buf_log_item_t *bip = bp->b_fspriv;
778 ASSERT(bp->b_transp == tp);
780 ASSERT(atomic_read(&bip->bli_refcount) > 0);
782 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
783 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
787 * Mark the buffer as ordered for this transaction. This means
788 * that the contents of the buffer are not recorded in the transaction
789 * but it is tracked in the AIL as though it was. This allows us
790 * to record logical changes in transactions rather than the physical
791 * changes we make to the buffer without changing writeback ordering
792 * constraints of metadata buffers.
795 xfs_trans_ordered_buf(
796 struct xfs_trans *tp,
799 struct xfs_buf_log_item *bip = bp->b_fspriv;
801 ASSERT(bp->b_transp == tp);
803 ASSERT(atomic_read(&bip->bli_refcount) > 0);
805 bip->bli_flags |= XFS_BLI_ORDERED;
806 trace_xfs_buf_item_ordered(bip);
810 * Set the type of the buffer for log recovery so that it can correctly identify
811 * and hence attach the correct buffer ops to the buffer after replay.
814 xfs_trans_buf_set_type(
815 struct xfs_trans *tp,
819 struct xfs_buf_log_item *bip = bp->b_fspriv;
824 ASSERT(bp->b_transp == tp);
826 ASSERT(atomic_read(&bip->bli_refcount) > 0);
828 xfs_blft_to_flags(&bip->__bli_format, type);
832 xfs_trans_buf_copy_type(
833 struct xfs_buf *dst_bp,
834 struct xfs_buf *src_bp)
836 struct xfs_buf_log_item *sbip = src_bp->b_fspriv;
837 struct xfs_buf_log_item *dbip = dst_bp->b_fspriv;
840 type = xfs_blft_from_flags(&sbip->__bli_format);
841 xfs_blft_to_flags(&dbip->__bli_format, type);
845 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
846 * dquots. However, unlike in inode buffer recovery, dquot buffers get
847 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
848 * The only thing that makes dquot buffers different from regular
849 * buffers is that we must not replay dquot bufs when recovering
850 * if a _corresponding_ quotaoff has happened. We also have to distinguish
851 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
852 * can be turned off independently.
861 struct xfs_buf_log_item *bip = bp->b_fspriv;
863 ASSERT(type == XFS_BLF_UDQUOT_BUF ||
864 type == XFS_BLF_PDQUOT_BUF ||
865 type == XFS_BLF_GDQUOT_BUF);
867 bip->__bli_format.blf_flags |= type;
870 case XFS_BLF_UDQUOT_BUF:
871 type = XFS_BLFT_UDQUOT_BUF;
873 case XFS_BLF_PDQUOT_BUF:
874 type = XFS_BLFT_PDQUOT_BUF;
876 case XFS_BLF_GDQUOT_BUF:
877 type = XFS_BLFT_GDQUOT_BUF;
880 type = XFS_BLFT_UNKNOWN_BUF;
884 xfs_trans_buf_set_type(tp, bp, type);