2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
28 #include "xfs_bmap_util.h"
29 #include "xfs_alloc.h"
30 #include "xfs_quota.h"
31 #include "xfs_error.h"
32 #include "xfs_trans.h"
33 #include "xfs_buf_item.h"
34 #include "xfs_trans_space.h"
35 #include "xfs_trans_priv.h"
37 #include "xfs_cksum.h"
38 #include "xfs_trace.h"
40 #include "xfs_bmap_btree.h"
47 * dquot->q_qlock (xfs_dqlock() and friends)
48 * dquot->q_flush (xfs_dqflock() and friends)
51 * If two dquots need to be locked the order is user before group/project,
52 * otherwise by the lowest id first, see xfs_dqlock2.
56 xfs_buftarg_t *xfs_dqerror_target;
59 int xfs_dqerror_mod = 33;
62 struct kmem_zone *xfs_qm_dqtrxzone;
63 static struct kmem_zone *xfs_qm_dqzone;
65 static struct lock_class_key xfs_dquot_group_class;
66 static struct lock_class_key xfs_dquot_project_class;
69 * This is called to free all the memory associated with a dquot
75 ASSERT(list_empty(&dqp->q_lru));
77 mutex_destroy(&dqp->q_qlock);
78 kmem_zone_free(xfs_qm_dqzone, dqp);
80 XFS_STATS_DEC(xs_qm_dquot);
84 * If default limits are in force, push them into the dquot now.
85 * We overwrite the dquot limits only if they are zero and this
86 * is not the root dquot.
89 xfs_qm_adjust_dqlimits(
93 struct xfs_quotainfo *q = mp->m_quotainfo;
94 struct xfs_disk_dquot *d = &dq->q_core;
99 if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
100 d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
103 if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
104 d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
107 if (q->qi_isoftlimit && !d->d_ino_softlimit)
108 d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
109 if (q->qi_ihardlimit && !d->d_ino_hardlimit)
110 d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
111 if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
112 d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
113 if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
114 d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
117 xfs_dquot_set_prealloc_limits(dq);
121 * Check the limits and timers of a dquot and start or reset timers
123 * This gets called even when quota enforcement is OFF, which makes our
124 * life a little less complicated. (We just don't reject any quota
125 * reservations in that case, when enforcement is off).
126 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
128 * In contrast, warnings are a little different in that they don't
129 * 'automatically' get started when limits get exceeded. They do
130 * get reset to zero, however, when we find the count to be under
131 * the soft limit (they are only ever set non-zero via userspace).
134 xfs_qm_adjust_dqtimers(
141 if (d->d_blk_hardlimit)
142 ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
143 be64_to_cpu(d->d_blk_hardlimit));
144 if (d->d_ino_hardlimit)
145 ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
146 be64_to_cpu(d->d_ino_hardlimit));
147 if (d->d_rtb_hardlimit)
148 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
149 be64_to_cpu(d->d_rtb_hardlimit));
153 if ((d->d_blk_softlimit &&
154 (be64_to_cpu(d->d_bcount) >
155 be64_to_cpu(d->d_blk_softlimit))) ||
156 (d->d_blk_hardlimit &&
157 (be64_to_cpu(d->d_bcount) >
158 be64_to_cpu(d->d_blk_hardlimit)))) {
159 d->d_btimer = cpu_to_be32(get_seconds() +
160 mp->m_quotainfo->qi_btimelimit);
165 if ((!d->d_blk_softlimit ||
166 (be64_to_cpu(d->d_bcount) <=
167 be64_to_cpu(d->d_blk_softlimit))) &&
168 (!d->d_blk_hardlimit ||
169 (be64_to_cpu(d->d_bcount) <=
170 be64_to_cpu(d->d_blk_hardlimit)))) {
176 if ((d->d_ino_softlimit &&
177 (be64_to_cpu(d->d_icount) >
178 be64_to_cpu(d->d_ino_softlimit))) ||
179 (d->d_ino_hardlimit &&
180 (be64_to_cpu(d->d_icount) >
181 be64_to_cpu(d->d_ino_hardlimit)))) {
182 d->d_itimer = cpu_to_be32(get_seconds() +
183 mp->m_quotainfo->qi_itimelimit);
188 if ((!d->d_ino_softlimit ||
189 (be64_to_cpu(d->d_icount) <=
190 be64_to_cpu(d->d_ino_softlimit))) &&
191 (!d->d_ino_hardlimit ||
192 (be64_to_cpu(d->d_icount) <=
193 be64_to_cpu(d->d_ino_hardlimit)))) {
198 if (!d->d_rtbtimer) {
199 if ((d->d_rtb_softlimit &&
200 (be64_to_cpu(d->d_rtbcount) >
201 be64_to_cpu(d->d_rtb_softlimit))) ||
202 (d->d_rtb_hardlimit &&
203 (be64_to_cpu(d->d_rtbcount) >
204 be64_to_cpu(d->d_rtb_hardlimit)))) {
205 d->d_rtbtimer = cpu_to_be32(get_seconds() +
206 mp->m_quotainfo->qi_rtbtimelimit);
211 if ((!d->d_rtb_softlimit ||
212 (be64_to_cpu(d->d_rtbcount) <=
213 be64_to_cpu(d->d_rtb_softlimit))) &&
214 (!d->d_rtb_hardlimit ||
215 (be64_to_cpu(d->d_rtbcount) <=
216 be64_to_cpu(d->d_rtb_hardlimit)))) {
223 * initialize a buffer full of dquots and log the whole thing
226 xfs_qm_init_dquot_blk(
233 struct xfs_quotainfo *q = mp->m_quotainfo;
238 ASSERT(xfs_buf_islocked(bp));
243 * ID of the first dquot in the block - id's are zero based.
245 curid = id - (id % q->qi_dqperchunk);
247 memset(d, 0, BBTOB(q->qi_dqchunklen));
248 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
249 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
250 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
251 d->dd_diskdq.d_id = cpu_to_be32(curid);
252 d->dd_diskdq.d_flags = type;
253 if (xfs_sb_version_hascrc(&mp->m_sb)) {
254 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
255 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
260 xfs_trans_dquot_buf(tp, bp,
261 (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
262 ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
263 XFS_BLF_GDQUOT_BUF)));
264 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
268 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
269 * watermarks correspond to the soft and hard limits by default. If a soft limit
270 * is not specified, we use 95% of the hard limit.
273 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
277 dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
278 dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
279 if (!dqp->q_prealloc_lo_wmark) {
280 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
281 do_div(dqp->q_prealloc_lo_wmark, 100);
282 dqp->q_prealloc_lo_wmark *= 95;
285 space = dqp->q_prealloc_hi_wmark;
288 dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
289 dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
290 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
294 * Allocate a block and fill it with dquots.
295 * This is called when the bmapi finds a hole.
303 xfs_fileoff_t offset_fsb,
306 xfs_fsblock_t firstblock;
307 xfs_bmap_free_t flist;
309 int nmaps, error, committed;
311 xfs_trans_t *tp = *tpp;
315 trace_xfs_dqalloc(dqp);
318 * Initialize the bmap freelist prior to calling bmapi code.
320 xfs_bmap_init(&flist, &firstblock);
321 xfs_ilock(quotip, XFS_ILOCK_EXCL);
323 * Return if this type of quotas is turned off while we didn't
326 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
327 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
331 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
333 error = xfs_bmapi_write(tp, quotip, offset_fsb,
334 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
335 &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
336 &map, &nmaps, &flist);
339 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
341 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
342 (map.br_startblock != HOLESTARTBLOCK));
345 * Keep track of the blkno to save a lookup later
347 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
349 /* now we can just get the buffer (there's nothing to read yet) */
350 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
352 mp->m_quotainfo->qi_dqchunklen,
358 bp->b_ops = &xfs_dquot_buf_ops;
361 * Make a chunk of dquots out of this buffer and log
364 xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
365 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
368 * xfs_bmap_finish() may commit the current transaction and
369 * start a second transaction if the freelist is not empty.
371 * Since we still want to modify this buffer, we need to
372 * ensure that the buffer is not released on commit of
373 * the first transaction and ensure the buffer is added to the
374 * second transaction.
376 * If there is only one transaction then don't stop the buffer
377 * from being released when it commits later on.
380 xfs_trans_bhold(tp, bp);
382 if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
388 xfs_trans_bjoin(tp, bp);
390 xfs_trans_bhold_release(tp, bp);
397 xfs_bmap_cancel(&flist);
399 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
406 struct xfs_mount *mp,
407 struct xfs_trans *tp,
408 struct xfs_dquot *dqp,
410 struct xfs_buf **bpp)
413 struct xfs_disk_dquot *ddq;
418 * Read the buffer without verification so we get the corrupted
419 * buffer returned to us. make sure we verify it on write, though.
421 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
422 mp->m_quotainfo->qi_dqchunklen,
426 ASSERT(*bpp == NULL);
429 (*bpp)->b_ops = &xfs_dquot_buf_ops;
431 ASSERT(xfs_buf_islocked(*bpp));
432 d = (struct xfs_dqblk *)(*bpp)->b_addr;
434 /* Do the actual repair of dquots in this buffer */
435 for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
436 ddq = &d[i].dd_diskdq;
437 error = xfs_dqcheck(mp, ddq, firstid + i,
438 dqp->dq_flags & XFS_DQ_ALLTYPES,
439 XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
441 /* repair failed, we're screwed */
442 xfs_trans_brelse(tp, *bpp);
451 * Maps a dquot to the buffer containing its on-disk version.
452 * This returns a ptr to the buffer containing the on-disk dquot
453 * in the bpp param, and a ptr to the on-disk dquot within that buffer
459 xfs_disk_dquot_t **O_ddpp,
463 struct xfs_bmbt_irec map;
464 int nmaps = 1, error;
466 struct xfs_inode *quotip = xfs_dq_to_quota_inode(dqp);
467 struct xfs_mount *mp = dqp->q_mount;
468 xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
469 struct xfs_trans *tp = (tpp ? *tpp : NULL);
472 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
474 lock_mode = xfs_ilock_data_map_shared(quotip);
475 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
477 * Return if this type of quotas is turned off while we
478 * didn't have the quota inode lock.
480 xfs_iunlock(quotip, lock_mode);
485 * Find the block map; no allocations yet
487 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
488 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
490 xfs_iunlock(quotip, lock_mode);
495 ASSERT(map.br_blockcount == 1);
498 * Offset of dquot in the (fixed sized) dquot chunk.
500 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
503 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
504 if (map.br_startblock == HOLESTARTBLOCK) {
506 * We don't allocate unless we're asked to
508 if (!(flags & XFS_QMOPT_DQALLOC))
512 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
513 dqp->q_fileoffset, &bp);
518 trace_xfs_dqtobp_read(dqp);
521 * store the blkno etc so that we don't have to do the
522 * mapping all the time
524 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
526 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
528 mp->m_quotainfo->qi_dqchunklen,
529 0, &bp, &xfs_dquot_buf_ops);
531 if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
532 xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
533 mp->m_quotainfo->qi_dqperchunk;
535 error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
544 ASSERT(xfs_buf_islocked(bp));
546 *O_ddpp = bp->b_addr + dqp->q_bufoffset;
553 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
554 * and release the buffer immediately.
556 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
560 struct xfs_mount *mp,
564 struct xfs_dquot **O_dqpp)
566 struct xfs_dquot *dqp;
567 struct xfs_disk_dquot *ddqp;
569 struct xfs_trans *tp = NULL;
574 dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
576 dqp->dq_flags = type;
577 dqp->q_core.d_id = cpu_to_be32(id);
579 INIT_LIST_HEAD(&dqp->q_lru);
580 mutex_init(&dqp->q_qlock);
581 init_waitqueue_head(&dqp->q_pinwait);
584 * Because we want to use a counting completion, complete
585 * the flush completion once to allow a single access to
586 * the flush completion without blocking.
588 init_completion(&dqp->q_flush);
589 complete(&dqp->q_flush);
592 * Make sure group quotas have a different lock class than user
597 /* uses the default lock class */
600 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
603 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
610 XFS_STATS_INC(xs_qm_dquot);
612 trace_xfs_dqread(dqp);
614 if (flags & XFS_QMOPT_DQALLOC) {
615 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
616 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc,
617 XFS_QM_DQALLOC_SPACE_RES(mp), 0);
620 cancelflags = XFS_TRANS_RELEASE_LOG_RES;
624 * get a pointer to the on-disk dquot and the buffer containing it
625 * dqp already knows its own type (GROUP/USER).
627 error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
630 * This can happen if quotas got turned off (ESRCH),
631 * or if the dquot didn't exist on disk and we ask to
634 trace_xfs_dqread_fail(dqp);
635 cancelflags |= XFS_TRANS_ABORT;
639 /* copy everything from disk dquot to the incore dquot */
640 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
641 xfs_qm_dquot_logitem_init(dqp);
644 * Reservation counters are defined as reservation plus current usage
645 * to avoid having to add every time.
647 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
648 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
649 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
651 /* initialize the dquot speculative prealloc thresholds */
652 xfs_dquot_set_prealloc_limits(dqp);
654 /* Mark the buf so that this will stay incore a little longer */
655 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
658 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
659 * So we need to release with xfs_trans_brelse().
660 * The strategy here is identical to that of inodes; we lock
661 * the dquot in xfs_qm_dqget() before making it accessible to
662 * others. This is because dquots, like inodes, need a good level of
663 * concurrency, and we don't want to take locks on the entire buffers
664 * for dquot accesses.
665 * Note also that the dquot buffer may even be dirty at this point, if
666 * this particular dquot was repaired. We still aren't afraid to
667 * brelse it because we have the changes incore.
669 ASSERT(xfs_buf_islocked(bp));
670 xfs_trans_brelse(tp, bp);
673 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
683 xfs_trans_cancel(tp, cancelflags);
685 xfs_qm_dqdestroy(dqp);
691 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
692 * a locked dquot, doing an allocation (if requested) as needed.
693 * When both an inode and an id are given, the inode's id takes precedence.
694 * That is, if the id changes while we don't hold the ilock inside this
695 * function, the new dquot is returned, not necessarily the one requested
696 * in the id argument.
701 xfs_inode_t *ip, /* locked inode (optional) */
702 xfs_dqid_t id, /* uid/projid/gid depending on type */
703 uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
704 uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
705 xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */
707 struct xfs_quotainfo *qi = mp->m_quotainfo;
708 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
709 struct xfs_dquot *dqp;
712 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
713 if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
714 (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
715 (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
720 if (xfs_do_dqerror) {
721 if ((xfs_dqerror_target == mp->m_ddev_targp) &&
722 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
723 xfs_debug(mp, "Returning error in dqget");
728 ASSERT(type == XFS_DQ_USER ||
729 type == XFS_DQ_PROJ ||
730 type == XFS_DQ_GROUP);
732 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
733 ASSERT(xfs_inode_dquot(ip, type) == NULL);
738 mutex_lock(&qi->qi_tree_lock);
739 dqp = radix_tree_lookup(tree, id);
742 if (dqp->dq_flags & XFS_DQ_FREEING) {
744 mutex_unlock(&qi->qi_tree_lock);
745 trace_xfs_dqget_freeing(dqp);
751 mutex_unlock(&qi->qi_tree_lock);
753 trace_xfs_dqget_hit(dqp);
754 XFS_STATS_INC(xs_qm_dqcachehits);
758 mutex_unlock(&qi->qi_tree_lock);
759 XFS_STATS_INC(xs_qm_dqcachemisses);
762 * Dquot cache miss. We don't want to keep the inode lock across
763 * a (potential) disk read. Also we don't want to deal with the lock
764 * ordering between quotainode and this inode. OTOH, dropping the inode
765 * lock here means dealing with a chown that can happen before
766 * we re-acquire the lock.
769 xfs_iunlock(ip, XFS_ILOCK_EXCL);
771 error = xfs_qm_dqread(mp, id, type, flags, &dqp);
774 xfs_ilock(ip, XFS_ILOCK_EXCL);
781 * A dquot could be attached to this inode by now, since
782 * we had dropped the ilock.
784 if (xfs_this_quota_on(mp, type)) {
785 struct xfs_dquot *dqp1;
787 dqp1 = xfs_inode_dquot(ip, type);
789 xfs_qm_dqdestroy(dqp);
795 /* inode stays locked on return */
796 xfs_qm_dqdestroy(dqp);
801 mutex_lock(&qi->qi_tree_lock);
802 error = radix_tree_insert(tree, id, dqp);
803 if (unlikely(error)) {
804 WARN_ON(error != -EEXIST);
807 * Duplicate found. Just throw away the new dquot and start
810 mutex_unlock(&qi->qi_tree_lock);
811 trace_xfs_dqget_dup(dqp);
812 xfs_qm_dqdestroy(dqp);
813 XFS_STATS_INC(xs_qm_dquot_dups);
818 * We return a locked dquot to the caller, with a reference taken
824 mutex_unlock(&qi->qi_tree_lock);
827 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
828 trace_xfs_dqget_miss(dqp);
834 * Release a reference to the dquot (decrement ref-count) and unlock it.
836 * If there is a group quota attached to this dquot, carefully release that
837 * too without tripping over deadlocks'n'stuff.
841 struct xfs_dquot *dqp)
843 ASSERT(dqp->q_nrefs > 0);
844 ASSERT(XFS_DQ_IS_LOCKED(dqp));
846 trace_xfs_dqput(dqp);
848 if (--dqp->q_nrefs == 0) {
849 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
850 trace_xfs_dqput_free(dqp);
852 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
853 XFS_STATS_INC(xs_qm_dquot_unused);
859 * Release a dquot. Flush it if dirty, then dqput() it.
860 * dquot must not be locked.
869 trace_xfs_dqrele(dqp);
873 * We don't care to flush it if the dquot is dirty here.
874 * That will create stutters that we want to avoid.
875 * Instead we do a delayed write when we try to reclaim
876 * a dirty dquot. Also xfs_sync will take part of the burden...
882 * This is the dquot flushing I/O completion routine. It is called
883 * from interrupt level when the buffer containing the dquot is
884 * flushed to disk. It is responsible for removing the dquot logitem
885 * from the AIL if it has not been re-logged, and unlocking the dquot's
886 * flush lock. This behavior is very similar to that of inodes..
891 struct xfs_log_item *lip)
893 xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
894 xfs_dquot_t *dqp = qip->qli_dquot;
895 struct xfs_ail *ailp = lip->li_ailp;
898 * We only want to pull the item from the AIL if its
899 * location in the log has not changed since we started the flush.
900 * Thus, we only bother if the dquot's lsn has
901 * not changed. First we check the lsn outside the lock
902 * since it's cheaper, and then we recheck while
903 * holding the lock before removing the dquot from the AIL.
905 if ((lip->li_flags & XFS_LI_IN_AIL) &&
906 lip->li_lsn == qip->qli_flush_lsn) {
908 /* xfs_trans_ail_delete() drops the AIL lock. */
909 spin_lock(&ailp->xa_lock);
910 if (lip->li_lsn == qip->qli_flush_lsn)
911 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
913 spin_unlock(&ailp->xa_lock);
917 * Release the dq's flush lock since we're done with it.
923 * Write a modified dquot to disk.
924 * The dquot must be locked and the flush lock too taken by caller.
925 * The flush lock will not be unlocked until the dquot reaches the disk,
926 * but the dquot is free to be unlocked and modified by the caller
927 * in the interim. Dquot is still locked on return. This behavior is
928 * identical to that of inodes.
932 struct xfs_dquot *dqp,
933 struct xfs_buf **bpp)
935 struct xfs_mount *mp = dqp->q_mount;
937 struct xfs_disk_dquot *ddqp;
940 ASSERT(XFS_DQ_IS_LOCKED(dqp));
941 ASSERT(!completion_done(&dqp->q_flush));
943 trace_xfs_dqflush(dqp);
947 xfs_qm_dqunpin_wait(dqp);
950 * This may have been unpinned because the filesystem is shutting
951 * down forcibly. If that's the case we must not write this dquot
952 * to disk, because the log record didn't make it to disk.
954 * We also have to remove the log item from the AIL in this case,
955 * as we wait for an emptry AIL as part of the unmount process.
957 if (XFS_FORCED_SHUTDOWN(mp)) {
958 struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
959 dqp->dq_flags &= ~XFS_DQ_DIRTY;
961 spin_lock(&mp->m_ail->xa_lock);
962 if (lip->li_flags & XFS_LI_IN_AIL)
963 xfs_trans_ail_delete(mp->m_ail, lip,
964 SHUTDOWN_CORRUPT_INCORE);
966 spin_unlock(&mp->m_ail->xa_lock);
972 * Get the buffer containing the on-disk dquot
974 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
975 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
981 * Calculate the location of the dquot inside the buffer.
983 ddqp = bp->b_addr + dqp->q_bufoffset;
986 * A simple sanity check in case we got a corrupted dquot..
988 error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
989 XFS_QMOPT_DOWARN, "dqflush (incore copy)");
993 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
997 /* This is the only portion of data that needs to persist */
998 memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1001 * Clear the dirty field and remember the flush lsn for later use.
1003 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1005 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1006 &dqp->q_logitem.qli_item.li_lsn);
1009 * copy the lsn into the on-disk dquot now while we have the in memory
1010 * dquot here. This can't be done later in the write verifier as we
1011 * can't get access to the log item at that point in time.
1013 * We also calculate the CRC here so that the on-disk dquot in the
1014 * buffer always has a valid CRC. This ensures there is no possibility
1015 * of a dquot without an up-to-date CRC getting to disk.
1017 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1018 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1020 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1021 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1026 * Attach an iodone routine so that we can remove this dquot from the
1027 * AIL and release the flush lock once the dquot is synced to disk.
1029 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1030 &dqp->q_logitem.qli_item);
1033 * If the buffer is pinned then push on the log so we won't
1034 * get stuck waiting in the write for too long.
1036 if (xfs_buf_ispinned(bp)) {
1037 trace_xfs_dqflush_force(dqp);
1038 xfs_log_force(mp, 0);
1041 trace_xfs_dqflush_done(dqp);
1051 * Lock two xfs_dquot structures.
1053 * To avoid deadlocks we always lock the quota structure with
1054 * the lowerd id first.
1063 if (be32_to_cpu(d1->q_core.d_id) >
1064 be32_to_cpu(d2->q_core.d_id)) {
1065 mutex_lock(&d2->q_qlock);
1066 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1068 mutex_lock(&d1->q_qlock);
1069 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1072 mutex_lock(&d1->q_qlock);
1074 mutex_lock(&d2->q_qlock);
1082 kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1087 kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1088 if (!xfs_qm_dqtrxzone)
1089 goto out_free_dqzone;
1094 kmem_zone_destroy(xfs_qm_dqzone);
1102 kmem_zone_destroy(xfs_qm_dqtrxzone);
1103 kmem_zone_destroy(xfs_qm_dqzone);