161444e768b663af00d3181177de1f1003741a90
[firefly-linux-kernel-4.4.55.git] / fs / xfs / xfs_vnodeops.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_types.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_dir2.h"
29 #include "xfs_mount.h"
30 #include "xfs_da_btree.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_itable.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_alloc.h"
39 #include "xfs_bmap.h"
40 #include "xfs_acl.h"
41 #include "xfs_attr.h"
42 #include "xfs_rw.h"
43 #include "xfs_error.h"
44 #include "xfs_quota.h"
45 #include "xfs_utils.h"
46 #include "xfs_rtalloc.h"
47 #include "xfs_trans_space.h"
48 #include "xfs_log_priv.h"
49 #include "xfs_filestream.h"
50 #include "xfs_vnodeops.h"
51 #include "xfs_trace.h"
52
53 int
54 xfs_setattr(
55         struct xfs_inode        *ip,
56         struct iattr            *iattr,
57         int                     flags)
58 {
59         xfs_mount_t             *mp = ip->i_mount;
60         struct inode            *inode = VFS_I(ip);
61         int                     mask = iattr->ia_valid;
62         xfs_trans_t             *tp;
63         int                     code;
64         uint                    lock_flags;
65         uint                    commit_flags=0;
66         uid_t                   uid=0, iuid=0;
67         gid_t                   gid=0, igid=0;
68         struct xfs_dquot        *udqp, *gdqp, *olddquot1, *olddquot2;
69         int                     need_iolock = 1;
70
71         xfs_itrace_entry(ip);
72
73         if (mp->m_flags & XFS_MOUNT_RDONLY)
74                 return XFS_ERROR(EROFS);
75
76         if (XFS_FORCED_SHUTDOWN(mp))
77                 return XFS_ERROR(EIO);
78
79         code = -inode_change_ok(inode, iattr);
80         if (code)
81                 return code;
82
83         olddquot1 = olddquot2 = NULL;
84         udqp = gdqp = NULL;
85
86         /*
87          * If disk quotas is on, we make sure that the dquots do exist on disk,
88          * before we start any other transactions. Trying to do this later
89          * is messy. We don't care to take a readlock to look at the ids
90          * in inode here, because we can't hold it across the trans_reserve.
91          * If the IDs do change before we take the ilock, we're covered
92          * because the i_*dquot fields will get updated anyway.
93          */
94         if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
95                 uint    qflags = 0;
96
97                 if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
98                         uid = iattr->ia_uid;
99                         qflags |= XFS_QMOPT_UQUOTA;
100                 } else {
101                         uid = ip->i_d.di_uid;
102                 }
103                 if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
104                         gid = iattr->ia_gid;
105                         qflags |= XFS_QMOPT_GQUOTA;
106                 }  else {
107                         gid = ip->i_d.di_gid;
108                 }
109
110                 /*
111                  * We take a reference when we initialize udqp and gdqp,
112                  * so it is important that we never blindly double trip on
113                  * the same variable. See xfs_create() for an example.
114                  */
115                 ASSERT(udqp == NULL);
116                 ASSERT(gdqp == NULL);
117                 code = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid,
118                                          qflags, &udqp, &gdqp);
119                 if (code)
120                         return code;
121         }
122
123         /*
124          * For the other attributes, we acquire the inode lock and
125          * first do an error checking pass.
126          */
127         tp = NULL;
128         lock_flags = XFS_ILOCK_EXCL;
129         if (flags & XFS_ATTR_NOLOCK)
130                 need_iolock = 0;
131         if (!(mask & ATTR_SIZE)) {
132                 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
133                 commit_flags = 0;
134                 code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp),
135                                          0, 0, 0);
136                 if (code) {
137                         lock_flags = 0;
138                         goto error_return;
139                 }
140         } else {
141                 if (need_iolock)
142                         lock_flags |= XFS_IOLOCK_EXCL;
143         }
144
145         xfs_ilock(ip, lock_flags);
146
147         /*
148          * Change file ownership.  Must be the owner or privileged.
149          */
150         if (mask & (ATTR_UID|ATTR_GID)) {
151                 /*
152                  * These IDs could have changed since we last looked at them.
153                  * But, we're assured that if the ownership did change
154                  * while we didn't have the inode locked, inode's dquot(s)
155                  * would have changed also.
156                  */
157                 iuid = ip->i_d.di_uid;
158                 igid = ip->i_d.di_gid;
159                 gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
160                 uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
161
162                 /*
163                  * Do a quota reservation only if uid/gid is actually
164                  * going to change.
165                  */
166                 if (XFS_IS_QUOTA_RUNNING(mp) &&
167                     ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
168                      (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
169                         ASSERT(tp);
170                         code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
171                                                 capable(CAP_FOWNER) ?
172                                                 XFS_QMOPT_FORCE_RES : 0);
173                         if (code)       /* out of quota */
174                                 goto error_return;
175                 }
176         }
177
178         /*
179          * Truncate file.  Must have write permission and not be a directory.
180          */
181         if (mask & ATTR_SIZE) {
182                 /* Short circuit the truncate case for zero length files */
183                 if (iattr->ia_size == 0 &&
184                     ip->i_size == 0 && ip->i_d.di_nextents == 0) {
185                         xfs_iunlock(ip, XFS_ILOCK_EXCL);
186                         lock_flags &= ~XFS_ILOCK_EXCL;
187                         if (mask & ATTR_CTIME)
188                                 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
189                         code = 0;
190                         goto error_return;
191                 }
192
193                 if (S_ISDIR(ip->i_d.di_mode)) {
194                         code = XFS_ERROR(EISDIR);
195                         goto error_return;
196                 } else if (!S_ISREG(ip->i_d.di_mode)) {
197                         code = XFS_ERROR(EINVAL);
198                         goto error_return;
199                 }
200
201                 /*
202                  * Make sure that the dquots are attached to the inode.
203                  */
204                 code = xfs_qm_dqattach_locked(ip, 0);
205                 if (code)
206                         goto error_return;
207
208                 /*
209                  * Now we can make the changes.  Before we join the inode
210                  * to the transaction, if ATTR_SIZE is set then take care of
211                  * the part of the truncation that must be done without the
212                  * inode lock.  This needs to be done before joining the inode
213                  * to the transaction, because the inode cannot be unlocked
214                  * once it is a part of the transaction.
215                  */
216                 if (iattr->ia_size > ip->i_size) {
217                         /*
218                          * Do the first part of growing a file: zero any data
219                          * in the last block that is beyond the old EOF.  We
220                          * need to do this before the inode is joined to the
221                          * transaction to modify the i_size.
222                          */
223                         code = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
224                 }
225                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
226
227                 /*
228                  * We are going to log the inode size change in this
229                  * transaction so any previous writes that are beyond the on
230                  * disk EOF and the new EOF that have not been written out need
231                  * to be written here. If we do not write the data out, we
232                  * expose ourselves to the null files problem.
233                  *
234                  * Only flush from the on disk size to the smaller of the in
235                  * memory file size or the new size as that's the range we
236                  * really care about here and prevents waiting for other data
237                  * not within the range we care about here.
238                  */
239                 if (!code &&
240                     ip->i_size != ip->i_d.di_size &&
241                     iattr->ia_size > ip->i_d.di_size) {
242                         code = xfs_flush_pages(ip,
243                                         ip->i_d.di_size, iattr->ia_size,
244                                         XBF_ASYNC, FI_NONE);
245                 }
246
247                 /* wait for all I/O to complete */
248                 xfs_ioend_wait(ip);
249
250                 if (!code)
251                         code = xfs_itruncate_data(ip, iattr->ia_size);
252                 if (code) {
253                         ASSERT(tp == NULL);
254                         lock_flags &= ~XFS_ILOCK_EXCL;
255                         ASSERT(lock_flags == XFS_IOLOCK_EXCL || !need_iolock);
256                         goto error_return;
257                 }
258                 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
259                 if ((code = xfs_trans_reserve(tp, 0,
260                                              XFS_ITRUNCATE_LOG_RES(mp), 0,
261                                              XFS_TRANS_PERM_LOG_RES,
262                                              XFS_ITRUNCATE_LOG_COUNT))) {
263                         xfs_trans_cancel(tp, 0);
264                         if (need_iolock)
265                                 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
266                         return code;
267                 }
268                 commit_flags = XFS_TRANS_RELEASE_LOG_RES;
269                 xfs_ilock(ip, XFS_ILOCK_EXCL);
270
271                 xfs_trans_ijoin(tp, ip, lock_flags);
272                 xfs_trans_ihold(tp, ip);
273
274                 /*
275                  * Only change the c/mtime if we are changing the size
276                  * or we are explicitly asked to change it. This handles
277                  * the semantic difference between truncate() and ftruncate()
278                  * as implemented in the VFS.
279                  *
280                  * The regular truncate() case without ATTR_CTIME and ATTR_MTIME
281                  * is a special case where we need to update the times despite
282                  * not having these flags set.  For all other operations the
283                  * VFS set these flags explicitly if it wants a timestamp
284                  * update.
285                  */
286                 if (iattr->ia_size != ip->i_size &&
287                     (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
288                         iattr->ia_ctime = iattr->ia_mtime =
289                                 current_fs_time(inode->i_sb);
290                         mask |= ATTR_CTIME | ATTR_MTIME;
291                 }
292
293                 if (iattr->ia_size > ip->i_size) {
294                         ip->i_d.di_size = iattr->ia_size;
295                         ip->i_size = iattr->ia_size;
296                         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
297                 } else if (iattr->ia_size <= ip->i_size ||
298                            (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
299                         /*
300                          * signal a sync transaction unless
301                          * we're truncating an already unlinked
302                          * file on a wsync filesystem
303                          */
304                         code = xfs_itruncate_finish(&tp, ip, iattr->ia_size,
305                                             XFS_DATA_FORK,
306                                             ((ip->i_d.di_nlink != 0 ||
307                                               !(mp->m_flags & XFS_MOUNT_WSYNC))
308                                              ? 1 : 0));
309                         if (code)
310                                 goto abort_return;
311                         /*
312                          * Truncated "down", so we're removing references
313                          * to old data here - if we now delay flushing for
314                          * a long time, we expose ourselves unduly to the
315                          * notorious NULL files problem.  So, we mark this
316                          * vnode and flush it when the file is closed, and
317                          * do not wait the usual (long) time for writeout.
318                          */
319                         xfs_iflags_set(ip, XFS_ITRUNCATED);
320                 }
321         } else if (tp) {
322                 xfs_trans_ijoin(tp, ip, lock_flags);
323                 xfs_trans_ihold(tp, ip);
324         }
325
326         /*
327          * Change file ownership.  Must be the owner or privileged.
328          */
329         if (mask & (ATTR_UID|ATTR_GID)) {
330                 /*
331                  * CAP_FSETID overrides the following restrictions:
332                  *
333                  * The set-user-ID and set-group-ID bits of a file will be
334                  * cleared upon successful return from chown()
335                  */
336                 if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
337                     !capable(CAP_FSETID)) {
338                         ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
339                 }
340
341                 /*
342                  * Change the ownerships and register quota modifications
343                  * in the transaction.
344                  */
345                 if (iuid != uid) {
346                         if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
347                                 ASSERT(mask & ATTR_UID);
348                                 ASSERT(udqp);
349                                 olddquot1 = xfs_qm_vop_chown(tp, ip,
350                                                         &ip->i_udquot, udqp);
351                         }
352                         ip->i_d.di_uid = uid;
353                         inode->i_uid = uid;
354                 }
355                 if (igid != gid) {
356                         if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
357                                 ASSERT(!XFS_IS_PQUOTA_ON(mp));
358                                 ASSERT(mask & ATTR_GID);
359                                 ASSERT(gdqp);
360                                 olddquot2 = xfs_qm_vop_chown(tp, ip,
361                                                         &ip->i_gdquot, gdqp);
362                         }
363                         ip->i_d.di_gid = gid;
364                         inode->i_gid = gid;
365                 }
366         }
367
368         /*
369          * Change file access modes.
370          */
371         if (mask & ATTR_MODE) {
372                 umode_t mode = iattr->ia_mode;
373
374                 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
375                         mode &= ~S_ISGID;
376
377                 ip->i_d.di_mode &= S_IFMT;
378                 ip->i_d.di_mode |= mode & ~S_IFMT;
379
380                 inode->i_mode &= S_IFMT;
381                 inode->i_mode |= mode & ~S_IFMT;
382         }
383
384         /*
385          * Change file access or modified times.
386          */
387         if (mask & ATTR_ATIME) {
388                 inode->i_atime = iattr->ia_atime;
389                 ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
390                 ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
391                 ip->i_update_core = 1;
392         }
393         if (mask & ATTR_CTIME) {
394                 inode->i_ctime = iattr->ia_ctime;
395                 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
396                 ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
397                 ip->i_update_core = 1;
398         }
399         if (mask & ATTR_MTIME) {
400                 inode->i_mtime = iattr->ia_mtime;
401                 ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
402                 ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
403                 ip->i_update_core = 1;
404         }
405
406         /*
407          * And finally, log the inode core if any attribute in it
408          * has been changed.
409          */
410         if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE|
411                     ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
412                 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
413
414         XFS_STATS_INC(xs_ig_attrchg);
415
416         /*
417          * If this is a synchronous mount, make sure that the
418          * transaction goes to disk before returning to the user.
419          * This is slightly sub-optimal in that truncates require
420          * two sync transactions instead of one for wsync filesystems.
421          * One for the truncate and one for the timestamps since we
422          * don't want to change the timestamps unless we're sure the
423          * truncate worked.  Truncates are less than 1% of the laddis
424          * mix so this probably isn't worth the trouble to optimize.
425          */
426         code = 0;
427         if (mp->m_flags & XFS_MOUNT_WSYNC)
428                 xfs_trans_set_sync(tp);
429
430         code = xfs_trans_commit(tp, commit_flags);
431
432         xfs_iunlock(ip, lock_flags);
433
434         /*
435          * Release any dquot(s) the inode had kept before chown.
436          */
437         xfs_qm_dqrele(olddquot1);
438         xfs_qm_dqrele(olddquot2);
439         xfs_qm_dqrele(udqp);
440         xfs_qm_dqrele(gdqp);
441
442         if (code)
443                 return code;
444
445         /*
446          * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
447          *           update.  We could avoid this with linked transactions
448          *           and passing down the transaction pointer all the way
449          *           to attr_set.  No previous user of the generic
450          *           Posix ACL code seems to care about this issue either.
451          */
452         if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
453                 code = -xfs_acl_chmod(inode);
454                 if (code)
455                         return XFS_ERROR(code);
456         }
457
458         return 0;
459
460  abort_return:
461         commit_flags |= XFS_TRANS_ABORT;
462  error_return:
463         xfs_qm_dqrele(udqp);
464         xfs_qm_dqrele(gdqp);
465         if (tp) {
466                 xfs_trans_cancel(tp, commit_flags);
467         }
468         if (lock_flags != 0) {
469                 xfs_iunlock(ip, lock_flags);
470         }
471         return code;
472 }
473
474 /*
475  * The maximum pathlen is 1024 bytes. Since the minimum file system
476  * blocksize is 512 bytes, we can get a max of 2 extents back from
477  * bmapi.
478  */
479 #define SYMLINK_MAPS 2
480
481 STATIC int
482 xfs_readlink_bmap(
483         xfs_inode_t     *ip,
484         char            *link)
485 {
486         xfs_mount_t     *mp = ip->i_mount;
487         int             pathlen = ip->i_d.di_size;
488         int             nmaps = SYMLINK_MAPS;
489         xfs_bmbt_irec_t mval[SYMLINK_MAPS];
490         xfs_daddr_t     d;
491         int             byte_cnt;
492         int             n;
493         xfs_buf_t       *bp;
494         int             error = 0;
495
496         error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0,
497                         mval, &nmaps, NULL, NULL);
498         if (error)
499                 goto out;
500
501         for (n = 0; n < nmaps; n++) {
502                 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
503                 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
504
505                 bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
506                                   XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK);
507                 error = XFS_BUF_GETERROR(bp);
508                 if (error) {
509                         xfs_ioerror_alert("xfs_readlink",
510                                   ip->i_mount, bp, XFS_BUF_ADDR(bp));
511                         xfs_buf_relse(bp);
512                         goto out;
513                 }
514                 if (pathlen < byte_cnt)
515                         byte_cnt = pathlen;
516                 pathlen -= byte_cnt;
517
518                 memcpy(link, XFS_BUF_PTR(bp), byte_cnt);
519                 xfs_buf_relse(bp);
520         }
521
522         link[ip->i_d.di_size] = '\0';
523         error = 0;
524
525  out:
526         return error;
527 }
528
529 int
530 xfs_readlink(
531         xfs_inode_t     *ip,
532         char            *link)
533 {
534         xfs_mount_t     *mp = ip->i_mount;
535         int             pathlen;
536         int             error = 0;
537
538         xfs_itrace_entry(ip);
539
540         if (XFS_FORCED_SHUTDOWN(mp))
541                 return XFS_ERROR(EIO);
542
543         xfs_ilock(ip, XFS_ILOCK_SHARED);
544
545         ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
546         ASSERT(ip->i_d.di_size <= MAXPATHLEN);
547
548         pathlen = ip->i_d.di_size;
549         if (!pathlen)
550                 goto out;
551
552         if (ip->i_df.if_flags & XFS_IFINLINE) {
553                 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
554                 link[pathlen] = '\0';
555         } else {
556                 error = xfs_readlink_bmap(ip, link);
557         }
558
559  out:
560         xfs_iunlock(ip, XFS_ILOCK_SHARED);
561         return error;
562 }
563
564 /*
565  * Flags for xfs_free_eofblocks
566  */
567 #define XFS_FREE_EOF_TRYLOCK    (1<<0)
568
569 /*
570  * This is called by xfs_inactive to free any blocks beyond eof
571  * when the link count isn't zero and by xfs_dm_punch_hole() when
572  * punching a hole to EOF.
573  */
574 STATIC int
575 xfs_free_eofblocks(
576         xfs_mount_t     *mp,
577         xfs_inode_t     *ip,
578         int             flags)
579 {
580         xfs_trans_t     *tp;
581         int             error;
582         xfs_fileoff_t   end_fsb;
583         xfs_fileoff_t   last_fsb;
584         xfs_filblks_t   map_len;
585         int             nimaps;
586         xfs_bmbt_irec_t imap;
587
588         /*
589          * Figure out if there are any blocks beyond the end
590          * of the file.  If not, then there is nothing to do.
591          */
592         end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size));
593         last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
594         map_len = last_fsb - end_fsb;
595         if (map_len <= 0)
596                 return 0;
597
598         nimaps = 1;
599         xfs_ilock(ip, XFS_ILOCK_SHARED);
600         error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0,
601                           NULL, 0, &imap, &nimaps, NULL, NULL);
602         xfs_iunlock(ip, XFS_ILOCK_SHARED);
603
604         if (!error && (nimaps != 0) &&
605             (imap.br_startblock != HOLESTARTBLOCK ||
606              ip->i_delayed_blks)) {
607                 /*
608                  * Attach the dquots to the inode up front.
609                  */
610                 error = xfs_qm_dqattach(ip, 0);
611                 if (error)
612                         return error;
613
614                 /*
615                  * There are blocks after the end of file.
616                  * Free them up now by truncating the file to
617                  * its current size.
618                  */
619                 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
620
621                 /*
622                  * Do the xfs_itruncate_start() call before
623                  * reserving any log space because
624                  * itruncate_start will call into the buffer
625                  * cache and we can't
626                  * do that within a transaction.
627                  */
628                 if (flags & XFS_FREE_EOF_TRYLOCK) {
629                         if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
630                                 xfs_trans_cancel(tp, 0);
631                                 return 0;
632                         }
633                 } else {
634                         xfs_ilock(ip, XFS_IOLOCK_EXCL);
635                 }
636                 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
637                                     ip->i_size);
638                 if (error) {
639                         xfs_trans_cancel(tp, 0);
640                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
641                         return error;
642                 }
643
644                 error = xfs_trans_reserve(tp, 0,
645                                           XFS_ITRUNCATE_LOG_RES(mp),
646                                           0, XFS_TRANS_PERM_LOG_RES,
647                                           XFS_ITRUNCATE_LOG_COUNT);
648                 if (error) {
649                         ASSERT(XFS_FORCED_SHUTDOWN(mp));
650                         xfs_trans_cancel(tp, 0);
651                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
652                         return error;
653                 }
654
655                 xfs_ilock(ip, XFS_ILOCK_EXCL);
656                 xfs_trans_ijoin(tp, ip,
657                                 XFS_IOLOCK_EXCL |
658                                 XFS_ILOCK_EXCL);
659                 xfs_trans_ihold(tp, ip);
660
661                 error = xfs_itruncate_finish(&tp, ip,
662                                              ip->i_size,
663                                              XFS_DATA_FORK,
664                                              0);
665                 /*
666                  * If we get an error at this point we
667                  * simply don't bother truncating the file.
668                  */
669                 if (error) {
670                         xfs_trans_cancel(tp,
671                                          (XFS_TRANS_RELEASE_LOG_RES |
672                                           XFS_TRANS_ABORT));
673                 } else {
674                         error = xfs_trans_commit(tp,
675                                                 XFS_TRANS_RELEASE_LOG_RES);
676                 }
677                 xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL);
678         }
679         return error;
680 }
681
682 /*
683  * Free a symlink that has blocks associated with it.
684  */
685 STATIC int
686 xfs_inactive_symlink_rmt(
687         xfs_inode_t     *ip,
688         xfs_trans_t     **tpp)
689 {
690         xfs_buf_t       *bp;
691         int             committed;
692         int             done;
693         int             error;
694         xfs_fsblock_t   first_block;
695         xfs_bmap_free_t free_list;
696         int             i;
697         xfs_mount_t     *mp;
698         xfs_bmbt_irec_t mval[SYMLINK_MAPS];
699         int             nmaps;
700         xfs_trans_t     *ntp;
701         int             size;
702         xfs_trans_t     *tp;
703
704         tp = *tpp;
705         mp = ip->i_mount;
706         ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip));
707         /*
708          * We're freeing a symlink that has some
709          * blocks allocated to it.  Free the
710          * blocks here.  We know that we've got
711          * either 1 or 2 extents and that we can
712          * free them all in one bunmapi call.
713          */
714         ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
715         if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
716                         XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
717                 ASSERT(XFS_FORCED_SHUTDOWN(mp));
718                 xfs_trans_cancel(tp, 0);
719                 *tpp = NULL;
720                 return error;
721         }
722         /*
723          * Lock the inode, fix the size, and join it to the transaction.
724          * Hold it so in the normal path, we still have it locked for
725          * the second transaction.  In the error paths we need it
726          * held so the cancel won't rele it, see below.
727          */
728         xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
729         size = (int)ip->i_d.di_size;
730         ip->i_d.di_size = 0;
731         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
732         xfs_trans_ihold(tp, ip);
733         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
734         /*
735          * Find the block(s) so we can inval and unmap them.
736          */
737         done = 0;
738         xfs_bmap_init(&free_list, &first_block);
739         nmaps = ARRAY_SIZE(mval);
740         if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size),
741                         XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps,
742                         &free_list, NULL)))
743                 goto error0;
744         /*
745          * Invalidate the block(s).
746          */
747         for (i = 0; i < nmaps; i++) {
748                 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
749                         XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
750                         XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
751                 xfs_trans_binval(tp, bp);
752         }
753         /*
754          * Unmap the dead block(s) to the free_list.
755          */
756         if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
757                         &first_block, &free_list, NULL, &done)))
758                 goto error1;
759         ASSERT(done);
760         /*
761          * Commit the first transaction.  This logs the EFI and the inode.
762          */
763         if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))
764                 goto error1;
765         /*
766          * The transaction must have been committed, since there were
767          * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.
768          * The new tp has the extent freeing and EFDs.
769          */
770         ASSERT(committed);
771         /*
772          * The first xact was committed, so add the inode to the new one.
773          * Mark it dirty so it will be logged and moved forward in the log as
774          * part of every commit.
775          */
776         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
777         xfs_trans_ihold(tp, ip);
778         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
779         /*
780          * Get a new, empty transaction to return to our caller.
781          */
782         ntp = xfs_trans_dup(tp);
783         /*
784          * Commit the transaction containing extent freeing and EFDs.
785          * If we get an error on the commit here or on the reserve below,
786          * we need to unlock the inode since the new transaction doesn't
787          * have the inode attached.
788          */
789         error = xfs_trans_commit(tp, 0);
790         tp = ntp;
791         if (error) {
792                 ASSERT(XFS_FORCED_SHUTDOWN(mp));
793                 goto error0;
794         }
795         /*
796          * transaction commit worked ok so we can drop the extra ticket
797          * reference that we gained in xfs_trans_dup()
798          */
799         xfs_log_ticket_put(tp->t_ticket);
800
801         /*
802          * Remove the memory for extent descriptions (just bookkeeping).
803          */
804         if (ip->i_df.if_bytes)
805                 xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);
806         ASSERT(ip->i_df.if_bytes == 0);
807         /*
808          * Put an itruncate log reservation in the new transaction
809          * for our caller.
810          */
811         if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
812                         XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
813                 ASSERT(XFS_FORCED_SHUTDOWN(mp));
814                 goto error0;
815         }
816         /*
817          * Return with the inode locked but not joined to the transaction.
818          */
819         *tpp = tp;
820         return 0;
821
822  error1:
823         xfs_bmap_cancel(&free_list);
824  error0:
825         /*
826          * Have to come here with the inode locked and either
827          * (held and in the transaction) or (not in the transaction).
828          * If the inode isn't held then cancel would iput it, but
829          * that's wrong since this is inactive and the vnode ref
830          * count is 0 already.
831          * Cancel won't do anything to the inode if held, but it still
832          * needs to be locked until the cancel is done, if it was
833          * joined to the transaction.
834          */
835         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
836         xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
837         *tpp = NULL;
838         return error;
839
840 }
841
842 STATIC int
843 xfs_inactive_symlink_local(
844         xfs_inode_t     *ip,
845         xfs_trans_t     **tpp)
846 {
847         int             error;
848
849         ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip));
850         /*
851          * We're freeing a symlink which fit into
852          * the inode.  Just free the memory used
853          * to hold the old symlink.
854          */
855         error = xfs_trans_reserve(*tpp, 0,
856                                   XFS_ITRUNCATE_LOG_RES(ip->i_mount),
857                                   0, XFS_TRANS_PERM_LOG_RES,
858                                   XFS_ITRUNCATE_LOG_COUNT);
859
860         if (error) {
861                 xfs_trans_cancel(*tpp, 0);
862                 *tpp = NULL;
863                 return error;
864         }
865         xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
866
867         /*
868          * Zero length symlinks _can_ exist.
869          */
870         if (ip->i_df.if_bytes > 0) {
871                 xfs_idata_realloc(ip,
872                                   -(ip->i_df.if_bytes),
873                                   XFS_DATA_FORK);
874                 ASSERT(ip->i_df.if_bytes == 0);
875         }
876         return 0;
877 }
878
879 STATIC int
880 xfs_inactive_attrs(
881         xfs_inode_t     *ip,
882         xfs_trans_t     **tpp)
883 {
884         xfs_trans_t     *tp;
885         int             error;
886         xfs_mount_t     *mp;
887
888         ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
889         tp = *tpp;
890         mp = ip->i_mount;
891         ASSERT(ip->i_d.di_forkoff != 0);
892         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
893         xfs_iunlock(ip, XFS_ILOCK_EXCL);
894         if (error)
895                 goto error_unlock;
896
897         error = xfs_attr_inactive(ip);
898         if (error)
899                 goto error_unlock;
900
901         tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
902         error = xfs_trans_reserve(tp, 0,
903                                   XFS_IFREE_LOG_RES(mp),
904                                   0, XFS_TRANS_PERM_LOG_RES,
905                                   XFS_INACTIVE_LOG_COUNT);
906         if (error)
907                 goto error_cancel;
908
909         xfs_ilock(ip, XFS_ILOCK_EXCL);
910         xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
911         xfs_trans_ihold(tp, ip);
912         xfs_idestroy_fork(ip, XFS_ATTR_FORK);
913
914         ASSERT(ip->i_d.di_anextents == 0);
915
916         *tpp = tp;
917         return 0;
918
919 error_cancel:
920         ASSERT(XFS_FORCED_SHUTDOWN(mp));
921         xfs_trans_cancel(tp, 0);
922 error_unlock:
923         *tpp = NULL;
924         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
925         return error;
926 }
927
928 int
929 xfs_release(
930         xfs_inode_t     *ip)
931 {
932         xfs_mount_t     *mp = ip->i_mount;
933         int             error;
934
935         if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
936                 return 0;
937
938         /* If this is a read-only mount, don't do this (would generate I/O) */
939         if (mp->m_flags & XFS_MOUNT_RDONLY)
940                 return 0;
941
942         if (!XFS_FORCED_SHUTDOWN(mp)) {
943                 int truncated;
944
945                 /*
946                  * If we are using filestreams, and we have an unlinked
947                  * file that we are processing the last close on, then nothing
948                  * will be able to reopen and write to this file. Purge this
949                  * inode from the filestreams cache so that it doesn't delay
950                  * teardown of the inode.
951                  */
952                 if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
953                         xfs_filestream_deassociate(ip);
954
955                 /*
956                  * If we previously truncated this file and removed old data
957                  * in the process, we want to initiate "early" writeout on
958                  * the last close.  This is an attempt to combat the notorious
959                  * NULL files problem which is particularly noticable from a
960                  * truncate down, buffered (re-)write (delalloc), followed by
961                  * a crash.  What we are effectively doing here is
962                  * significantly reducing the time window where we'd otherwise
963                  * be exposed to that problem.
964                  */
965                 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
966                 if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
967                         xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
968         }
969
970         if (ip->i_d.di_nlink != 0) {
971                 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
972                      ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
973                        ip->i_delayed_blks > 0)) &&
974                      (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
975                     (!(ip->i_d.di_flags &
976                                 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
977
978                         /*
979                          * If we can't get the iolock just skip truncating
980                          * the blocks past EOF because we could deadlock
981                          * with the mmap_sem otherwise.  We'll get another
982                          * chance to drop them once the last reference to
983                          * the inode is dropped, so we'll never leak blocks
984                          * permanently.
985                          */
986                         error = xfs_free_eofblocks(mp, ip,
987                                                    XFS_FREE_EOF_TRYLOCK);
988                         if (error)
989                                 return error;
990                 }
991         }
992
993         return 0;
994 }
995
996 /*
997  * xfs_inactive
998  *
999  * This is called when the vnode reference count for the vnode
1000  * goes to zero.  If the file has been unlinked, then it must
1001  * now be truncated.  Also, we clear all of the read-ahead state
1002  * kept for the inode here since the file is now closed.
1003  */
1004 int
1005 xfs_inactive(
1006         xfs_inode_t     *ip)
1007 {
1008         xfs_bmap_free_t free_list;
1009         xfs_fsblock_t   first_block;
1010         int             committed;
1011         xfs_trans_t     *tp;
1012         xfs_mount_t     *mp;
1013         int             error;
1014         int             truncate;
1015
1016         xfs_itrace_entry(ip);
1017
1018         /*
1019          * If the inode is already free, then there can be nothing
1020          * to clean up here.
1021          */
1022         if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
1023                 ASSERT(ip->i_df.if_real_bytes == 0);
1024                 ASSERT(ip->i_df.if_broot_bytes == 0);
1025                 return VN_INACTIVE_CACHE;
1026         }
1027
1028         /*
1029          * Only do a truncate if it's a regular file with
1030          * some actual space in it.  It's OK to look at the
1031          * inode's fields without the lock because we're the
1032          * only one with a reference to the inode.
1033          */
1034         truncate = ((ip->i_d.di_nlink == 0) &&
1035             ((ip->i_d.di_size != 0) || (ip->i_size != 0) ||
1036              (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) &&
1037             ((ip->i_d.di_mode & S_IFMT) == S_IFREG));
1038
1039         mp = ip->i_mount;
1040
1041         error = 0;
1042
1043         /* If this is a read-only mount, don't do this (would generate I/O) */
1044         if (mp->m_flags & XFS_MOUNT_RDONLY)
1045                 goto out;
1046
1047         if (ip->i_d.di_nlink != 0) {
1048                 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1049                      ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
1050                        ip->i_delayed_blks > 0)) &&
1051                       (ip->i_df.if_flags & XFS_IFEXTENTS) &&
1052                      (!(ip->i_d.di_flags &
1053                                 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
1054                       (ip->i_delayed_blks != 0)))) {
1055                         error = xfs_free_eofblocks(mp, ip, 0);
1056                         if (error)
1057                                 return VN_INACTIVE_CACHE;
1058                 }
1059                 goto out;
1060         }
1061
1062         ASSERT(ip->i_d.di_nlink == 0);
1063
1064         error = xfs_qm_dqattach(ip, 0);
1065         if (error)
1066                 return VN_INACTIVE_CACHE;
1067
1068         tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
1069         if (truncate) {
1070                 /*
1071                  * Do the xfs_itruncate_start() call before
1072                  * reserving any log space because itruncate_start
1073                  * will call into the buffer cache and we can't
1074                  * do that within a transaction.
1075                  */
1076                 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1077
1078                 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0);
1079                 if (error) {
1080                         xfs_trans_cancel(tp, 0);
1081                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1082                         return VN_INACTIVE_CACHE;
1083                 }
1084
1085                 error = xfs_trans_reserve(tp, 0,
1086                                           XFS_ITRUNCATE_LOG_RES(mp),
1087                                           0, XFS_TRANS_PERM_LOG_RES,
1088                                           XFS_ITRUNCATE_LOG_COUNT);
1089                 if (error) {
1090                         /* Don't call itruncate_cleanup */
1091                         ASSERT(XFS_FORCED_SHUTDOWN(mp));
1092                         xfs_trans_cancel(tp, 0);
1093                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1094                         return VN_INACTIVE_CACHE;
1095                 }
1096
1097                 xfs_ilock(ip, XFS_ILOCK_EXCL);
1098                 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1099                 xfs_trans_ihold(tp, ip);
1100
1101                 /*
1102                  * normally, we have to run xfs_itruncate_finish sync.
1103                  * But if filesystem is wsync and we're in the inactive
1104                  * path, then we know that nlink == 0, and that the
1105                  * xaction that made nlink == 0 is permanently committed
1106                  * since xfs_remove runs as a synchronous transaction.
1107                  */
1108                 error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK,
1109                                 (!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0));
1110
1111                 if (error) {
1112                         xfs_trans_cancel(tp,
1113                                 XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1114                         xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1115                         return VN_INACTIVE_CACHE;
1116                 }
1117         } else if ((ip->i_d.di_mode & S_IFMT) == S_IFLNK) {
1118
1119                 /*
1120                  * If we get an error while cleaning up a
1121                  * symlink we bail out.
1122                  */
1123                 error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ?
1124                         xfs_inactive_symlink_rmt(ip, &tp) :
1125                         xfs_inactive_symlink_local(ip, &tp);
1126
1127                 if (error) {
1128                         ASSERT(tp == NULL);
1129                         return VN_INACTIVE_CACHE;
1130                 }
1131
1132                 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1133                 xfs_trans_ihold(tp, ip);
1134         } else {
1135                 error = xfs_trans_reserve(tp, 0,
1136                                           XFS_IFREE_LOG_RES(mp),
1137                                           0, XFS_TRANS_PERM_LOG_RES,
1138                                           XFS_INACTIVE_LOG_COUNT);
1139                 if (error) {
1140                         ASSERT(XFS_FORCED_SHUTDOWN(mp));
1141                         xfs_trans_cancel(tp, 0);
1142                         return VN_INACTIVE_CACHE;
1143                 }
1144
1145                 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1146                 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1147                 xfs_trans_ihold(tp, ip);
1148         }
1149
1150         /*
1151          * If there are attributes associated with the file
1152          * then blow them away now.  The code calls a routine
1153          * that recursively deconstructs the attribute fork.
1154          * We need to just commit the current transaction
1155          * because we can't use it for xfs_attr_inactive().
1156          */
1157         if (ip->i_d.di_anextents > 0) {
1158                 error = xfs_inactive_attrs(ip, &tp);
1159                 /*
1160                  * If we got an error, the transaction is already
1161                  * cancelled, and the inode is unlocked. Just get out.
1162                  */
1163                  if (error)
1164                          return VN_INACTIVE_CACHE;
1165         } else if (ip->i_afp) {
1166                 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
1167         }
1168
1169         /*
1170          * Free the inode.
1171          */
1172         xfs_bmap_init(&free_list, &first_block);
1173         error = xfs_ifree(tp, ip, &free_list);
1174         if (error) {
1175                 /*
1176                  * If we fail to free the inode, shut down.  The cancel
1177                  * might do that, we need to make sure.  Otherwise the
1178                  * inode might be lost for a long time or forever.
1179                  */
1180                 if (!XFS_FORCED_SHUTDOWN(mp)) {
1181                         cmn_err(CE_NOTE,
1182                 "xfs_inactive:  xfs_ifree() returned an error = %d on %s",
1183                                 error, mp->m_fsname);
1184                         xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1185                 }
1186                 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
1187         } else {
1188                 /*
1189                  * Credit the quota account(s). The inode is gone.
1190                  */
1191                 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1192
1193                 /*
1194                  * Just ignore errors at this point.  There is nothing we can
1195                  * do except to try to keep going. Make sure it's not a silent
1196                  * error.
1197                  */
1198                 error = xfs_bmap_finish(&tp,  &free_list, &committed);
1199                 if (error)
1200                         xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
1201                                 "xfs_bmap_finish() returned error %d", error);
1202                 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1203                 if (error)
1204                         xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
1205                                 "xfs_trans_commit() returned error %d", error);
1206         }
1207
1208         /*
1209          * Release the dquots held by inode, if any.
1210          */
1211         xfs_qm_dqdetach(ip);
1212         xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1213
1214  out:
1215         return VN_INACTIVE_CACHE;
1216 }
1217
1218 /*
1219  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
1220  * is allowed, otherwise it has to be an exact match. If a CI match is found,
1221  * ci_name->name will point to a the actual name (caller must free) or
1222  * will be set to NULL if an exact match is found.
1223  */
1224 int
1225 xfs_lookup(
1226         xfs_inode_t             *dp,
1227         struct xfs_name         *name,
1228         xfs_inode_t             **ipp,
1229         struct xfs_name         *ci_name)
1230 {
1231         xfs_ino_t               inum;
1232         int                     error;
1233         uint                    lock_mode;
1234
1235         xfs_itrace_entry(dp);
1236
1237         if (XFS_FORCED_SHUTDOWN(dp->i_mount))
1238                 return XFS_ERROR(EIO);
1239
1240         lock_mode = xfs_ilock_map_shared(dp);
1241         error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
1242         xfs_iunlock_map_shared(dp, lock_mode);
1243
1244         if (error)
1245                 goto out;
1246
1247         error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
1248         if (error)
1249                 goto out_free_name;
1250
1251         return 0;
1252
1253 out_free_name:
1254         if (ci_name)
1255                 kmem_free(ci_name->name);
1256 out:
1257         *ipp = NULL;
1258         return error;
1259 }
1260
1261 int
1262 xfs_create(
1263         xfs_inode_t             *dp,
1264         struct xfs_name         *name,
1265         mode_t                  mode,
1266         xfs_dev_t               rdev,
1267         xfs_inode_t             **ipp,
1268         cred_t                  *credp)
1269 {
1270         int                     is_dir = S_ISDIR(mode);
1271         struct xfs_mount        *mp = dp->i_mount;
1272         struct xfs_inode        *ip = NULL;
1273         struct xfs_trans        *tp = NULL;
1274         int                     error;
1275         xfs_bmap_free_t         free_list;
1276         xfs_fsblock_t           first_block;
1277         boolean_t               unlock_dp_on_error = B_FALSE;
1278         uint                    cancel_flags;
1279         int                     committed;
1280         xfs_prid_t              prid;
1281         struct xfs_dquot        *udqp = NULL;
1282         struct xfs_dquot        *gdqp = NULL;
1283         uint                    resblks;
1284         uint                    log_res;
1285         uint                    log_count;
1286
1287         xfs_itrace_entry(dp);
1288
1289         if (XFS_FORCED_SHUTDOWN(mp))
1290                 return XFS_ERROR(EIO);
1291
1292         if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1293                 prid = dp->i_d.di_projid;
1294         else
1295                 prid = dfltprid;
1296
1297         /*
1298          * Make sure that we have allocated dquot(s) on disk.
1299          */
1300         error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1301                         XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
1302         if (error)
1303                 goto std_return;
1304
1305         if (is_dir) {
1306                 rdev = 0;
1307                 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1308                 log_res = XFS_MKDIR_LOG_RES(mp);
1309                 log_count = XFS_MKDIR_LOG_COUNT;
1310                 tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
1311         } else {
1312                 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1313                 log_res = XFS_CREATE_LOG_RES(mp);
1314                 log_count = XFS_CREATE_LOG_COUNT;
1315                 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
1316         }
1317
1318         cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1319
1320         /*
1321          * Initially assume that the file does not exist and
1322          * reserve the resources for that case.  If that is not
1323          * the case we'll drop the one we have and get a more
1324          * appropriate transaction later.
1325          */
1326         error = xfs_trans_reserve(tp, resblks, log_res, 0,
1327                         XFS_TRANS_PERM_LOG_RES, log_count);
1328         if (error == ENOSPC) {
1329                 /* flush outstanding delalloc blocks and retry */
1330                 xfs_flush_inodes(dp);
1331                 error = xfs_trans_reserve(tp, resblks, log_res, 0,
1332                                 XFS_TRANS_PERM_LOG_RES, log_count);
1333         }
1334         if (error == ENOSPC) {
1335                 /* No space at all so try a "no-allocation" reservation */
1336                 resblks = 0;
1337                 error = xfs_trans_reserve(tp, 0, log_res, 0,
1338                                 XFS_TRANS_PERM_LOG_RES, log_count);
1339         }
1340         if (error) {
1341                 cancel_flags = 0;
1342                 goto out_trans_cancel;
1343         }
1344
1345         xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1346         unlock_dp_on_error = B_TRUE;
1347
1348         /*
1349          * Check for directory link count overflow.
1350          */
1351         if (is_dir && dp->i_d.di_nlink >= XFS_MAXLINK) {
1352                 error = XFS_ERROR(EMLINK);
1353                 goto out_trans_cancel;
1354         }
1355
1356         xfs_bmap_init(&free_list, &first_block);
1357
1358         /*
1359          * Reserve disk quota and the inode.
1360          */
1361         error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
1362         if (error)
1363                 goto out_trans_cancel;
1364
1365         error = xfs_dir_canenter(tp, dp, name, resblks);
1366         if (error)
1367                 goto out_trans_cancel;
1368
1369         /*
1370          * A newly created regular or special file just has one directory
1371          * entry pointing to them, but a directory also the "." entry
1372          * pointing to itself.
1373          */
1374         error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, credp,
1375                                prid, resblks > 0, &ip, &committed);
1376         if (error) {
1377                 if (error == ENOSPC)
1378                         goto out_trans_cancel;
1379                 goto out_trans_abort;
1380         }
1381
1382         /*
1383          * At this point, we've gotten a newly allocated inode.
1384          * It is locked (and joined to the transaction).
1385          */
1386         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1387
1388         /*
1389          * Now we join the directory inode to the transaction.  We do not do it
1390          * earlier because xfs_dir_ialloc might commit the previous transaction
1391          * (and release all the locks).  An error from here on will result in
1392          * the transaction cancel unlocking dp so don't do it explicitly in the
1393          * error path.
1394          */
1395         IHOLD(dp);
1396         xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1397         unlock_dp_on_error = B_FALSE;
1398
1399         error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1400                                         &first_block, &free_list, resblks ?
1401                                         resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1402         if (error) {
1403                 ASSERT(error != ENOSPC);
1404                 goto out_trans_abort;
1405         }
1406         xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1407         xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1408
1409         if (is_dir) {
1410                 error = xfs_dir_init(tp, ip, dp);
1411                 if (error)
1412                         goto out_bmap_cancel;
1413
1414                 error = xfs_bumplink(tp, dp);
1415                 if (error)
1416                         goto out_bmap_cancel;
1417         }
1418
1419         /*
1420          * If this is a synchronous mount, make sure that the
1421          * create transaction goes to disk before returning to
1422          * the user.
1423          */
1424         if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1425                 xfs_trans_set_sync(tp);
1426
1427         /*
1428          * Attach the dquot(s) to the inodes and modify them incore.
1429          * These ids of the inode couldn't have changed since the new
1430          * inode has been locked ever since it was created.
1431          */
1432         xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
1433
1434         /*
1435          * xfs_trans_commit normally decrements the vnode ref count
1436          * when it unlocks the inode. Since we want to return the
1437          * vnode to the caller, we bump the vnode ref count now.
1438          */
1439         IHOLD(ip);
1440
1441         error = xfs_bmap_finish(&tp, &free_list, &committed);
1442         if (error)
1443                 goto out_abort_rele;
1444
1445         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1446         if (error) {
1447                 IRELE(ip);
1448                 goto out_dqrele;
1449         }
1450
1451         xfs_qm_dqrele(udqp);
1452         xfs_qm_dqrele(gdqp);
1453
1454         *ipp = ip;
1455         return 0;
1456
1457  out_bmap_cancel:
1458         xfs_bmap_cancel(&free_list);
1459  out_trans_abort:
1460         cancel_flags |= XFS_TRANS_ABORT;
1461  out_trans_cancel:
1462         xfs_trans_cancel(tp, cancel_flags);
1463  out_dqrele:
1464         xfs_qm_dqrele(udqp);
1465         xfs_qm_dqrele(gdqp);
1466
1467         if (unlock_dp_on_error)
1468                 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1469  std_return:
1470         return error;
1471
1472  out_abort_rele:
1473         /*
1474          * Wait until after the current transaction is aborted to
1475          * release the inode.  This prevents recursive transactions
1476          * and deadlocks from xfs_inactive.
1477          */
1478         xfs_bmap_cancel(&free_list);
1479         cancel_flags |= XFS_TRANS_ABORT;
1480         xfs_trans_cancel(tp, cancel_flags);
1481         IRELE(ip);
1482         unlock_dp_on_error = B_FALSE;
1483         goto out_dqrele;
1484 }
1485
1486 #ifdef DEBUG
1487 int xfs_locked_n;
1488 int xfs_small_retries;
1489 int xfs_middle_retries;
1490 int xfs_lots_retries;
1491 int xfs_lock_delays;
1492 #endif
1493
1494 /*
1495  * Bump the subclass so xfs_lock_inodes() acquires each lock with
1496  * a different value
1497  */
1498 static inline int
1499 xfs_lock_inumorder(int lock_mode, int subclass)
1500 {
1501         if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
1502                 lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
1503         if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
1504                 lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
1505
1506         return lock_mode;
1507 }
1508
1509 /*
1510  * The following routine will lock n inodes in exclusive mode.
1511  * We assume the caller calls us with the inodes in i_ino order.
1512  *
1513  * We need to detect deadlock where an inode that we lock
1514  * is in the AIL and we start waiting for another inode that is locked
1515  * by a thread in a long running transaction (such as truncate). This can
1516  * result in deadlock since the long running trans might need to wait
1517  * for the inode we just locked in order to push the tail and free space
1518  * in the log.
1519  */
1520 void
1521 xfs_lock_inodes(
1522         xfs_inode_t     **ips,
1523         int             inodes,
1524         uint            lock_mode)
1525 {
1526         int             attempts = 0, i, j, try_lock;
1527         xfs_log_item_t  *lp;
1528
1529         ASSERT(ips && (inodes >= 2)); /* we need at least two */
1530
1531         try_lock = 0;
1532         i = 0;
1533
1534 again:
1535         for (; i < inodes; i++) {
1536                 ASSERT(ips[i]);
1537
1538                 if (i && (ips[i] == ips[i-1]))  /* Already locked */
1539                         continue;
1540
1541                 /*
1542                  * If try_lock is not set yet, make sure all locked inodes
1543                  * are not in the AIL.
1544                  * If any are, set try_lock to be used later.
1545                  */
1546
1547                 if (!try_lock) {
1548                         for (j = (i - 1); j >= 0 && !try_lock; j--) {
1549                                 lp = (xfs_log_item_t *)ips[j]->i_itemp;
1550                                 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
1551                                         try_lock++;
1552                                 }
1553                         }
1554                 }
1555
1556                 /*
1557                  * If any of the previous locks we have locked is in the AIL,
1558                  * we must TRY to get the second and subsequent locks. If
1559                  * we can't get any, we must release all we have
1560                  * and try again.
1561                  */
1562
1563                 if (try_lock) {
1564                         /* try_lock must be 0 if i is 0. */
1565                         /*
1566                          * try_lock means we have an inode locked
1567                          * that is in the AIL.
1568                          */
1569                         ASSERT(i != 0);
1570                         if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
1571                                 attempts++;
1572
1573                                 /*
1574                                  * Unlock all previous guys and try again.
1575                                  * xfs_iunlock will try to push the tail
1576                                  * if the inode is in the AIL.
1577                                  */
1578
1579                                 for(j = i - 1; j >= 0; j--) {
1580
1581                                         /*
1582                                          * Check to see if we've already
1583                                          * unlocked this one.
1584                                          * Not the first one going back,
1585                                          * and the inode ptr is the same.
1586                                          */
1587                                         if ((j != (i - 1)) && ips[j] ==
1588                                                                 ips[j+1])
1589                                                 continue;
1590
1591                                         xfs_iunlock(ips[j], lock_mode);
1592                                 }
1593
1594                                 if ((attempts % 5) == 0) {
1595                                         delay(1); /* Don't just spin the CPU */
1596 #ifdef DEBUG
1597                                         xfs_lock_delays++;
1598 #endif
1599                                 }
1600                                 i = 0;
1601                                 try_lock = 0;
1602                                 goto again;
1603                         }
1604                 } else {
1605                         xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
1606                 }
1607         }
1608
1609 #ifdef DEBUG
1610         if (attempts) {
1611                 if (attempts < 5) xfs_small_retries++;
1612                 else if (attempts < 100) xfs_middle_retries++;
1613                 else xfs_lots_retries++;
1614         } else {
1615                 xfs_locked_n++;
1616         }
1617 #endif
1618 }
1619
1620 /*
1621  * xfs_lock_two_inodes() can only be used to lock one type of lock
1622  * at a time - the iolock or the ilock, but not both at once. If
1623  * we lock both at once, lockdep will report false positives saying
1624  * we have violated locking orders.
1625  */
1626 void
1627 xfs_lock_two_inodes(
1628         xfs_inode_t             *ip0,
1629         xfs_inode_t             *ip1,
1630         uint                    lock_mode)
1631 {
1632         xfs_inode_t             *temp;
1633         int                     attempts = 0;
1634         xfs_log_item_t          *lp;
1635
1636         if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
1637                 ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
1638         ASSERT(ip0->i_ino != ip1->i_ino);
1639
1640         if (ip0->i_ino > ip1->i_ino) {
1641                 temp = ip0;
1642                 ip0 = ip1;
1643                 ip1 = temp;
1644         }
1645
1646  again:
1647         xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
1648
1649         /*
1650          * If the first lock we have locked is in the AIL, we must TRY to get
1651          * the second lock. If we can't get it, we must release the first one
1652          * and try again.
1653          */
1654         lp = (xfs_log_item_t *)ip0->i_itemp;
1655         if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
1656                 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
1657                         xfs_iunlock(ip0, lock_mode);
1658                         if ((++attempts % 5) == 0)
1659                                 delay(1); /* Don't just spin the CPU */
1660                         goto again;
1661                 }
1662         } else {
1663                 xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
1664         }
1665 }
1666
1667 int
1668 xfs_remove(
1669         xfs_inode_t             *dp,
1670         struct xfs_name         *name,
1671         xfs_inode_t             *ip)
1672 {
1673         xfs_mount_t             *mp = dp->i_mount;
1674         xfs_trans_t             *tp = NULL;
1675         int                     is_dir = S_ISDIR(ip->i_d.di_mode);
1676         int                     error = 0;
1677         xfs_bmap_free_t         free_list;
1678         xfs_fsblock_t           first_block;
1679         int                     cancel_flags;
1680         int                     committed;
1681         int                     link_zero;
1682         uint                    resblks;
1683         uint                    log_count;
1684
1685         xfs_itrace_entry(dp);
1686         xfs_itrace_entry(ip);
1687
1688         if (XFS_FORCED_SHUTDOWN(mp))
1689                 return XFS_ERROR(EIO);
1690
1691         error = xfs_qm_dqattach(dp, 0);
1692         if (error)
1693                 goto std_return;
1694
1695         error = xfs_qm_dqattach(ip, 0);
1696         if (error)
1697                 goto std_return;
1698
1699         if (is_dir) {
1700                 tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
1701                 log_count = XFS_DEFAULT_LOG_COUNT;
1702         } else {
1703                 tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
1704                 log_count = XFS_REMOVE_LOG_COUNT;
1705         }
1706         cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1707
1708         /*
1709          * We try to get the real space reservation first,
1710          * allowing for directory btree deletion(s) implying
1711          * possible bmap insert(s).  If we can't get the space
1712          * reservation then we use 0 instead, and avoid the bmap
1713          * btree insert(s) in the directory code by, if the bmap
1714          * insert tries to happen, instead trimming the LAST
1715          * block from the directory.
1716          */
1717         resblks = XFS_REMOVE_SPACE_RES(mp);
1718         error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,
1719                                   XFS_TRANS_PERM_LOG_RES, log_count);
1720         if (error == ENOSPC) {
1721                 resblks = 0;
1722                 error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,
1723                                           XFS_TRANS_PERM_LOG_RES, log_count);
1724         }
1725         if (error) {
1726                 ASSERT(error != ENOSPC);
1727                 cancel_flags = 0;
1728                 goto out_trans_cancel;
1729         }
1730
1731         xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
1732
1733         /*
1734          * At this point, we've gotten both the directory and the entry
1735          * inodes locked.
1736          */
1737         IHOLD(ip);
1738         xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1739
1740         IHOLD(dp);
1741         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1742
1743         /*
1744          * If we're removing a directory perform some additional validation.
1745          */
1746         if (is_dir) {
1747                 ASSERT(ip->i_d.di_nlink >= 2);
1748                 if (ip->i_d.di_nlink != 2) {
1749                         error = XFS_ERROR(ENOTEMPTY);
1750                         goto out_trans_cancel;
1751                 }
1752                 if (!xfs_dir_isempty(ip)) {
1753                         error = XFS_ERROR(ENOTEMPTY);
1754                         goto out_trans_cancel;
1755                 }
1756         }
1757
1758         xfs_bmap_init(&free_list, &first_block);
1759         error = xfs_dir_removename(tp, dp, name, ip->i_ino,
1760                                         &first_block, &free_list, resblks);
1761         if (error) {
1762                 ASSERT(error != ENOENT);
1763                 goto out_bmap_cancel;
1764         }
1765         xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1766
1767         if (is_dir) {
1768                 /*
1769                  * Drop the link from ip's "..".
1770                  */
1771                 error = xfs_droplink(tp, dp);
1772                 if (error)
1773                         goto out_bmap_cancel;
1774
1775                 /*
1776                  * Drop the "." link from ip to self.
1777                  */
1778                 error = xfs_droplink(tp, ip);
1779                 if (error)
1780                         goto out_bmap_cancel;
1781         } else {
1782                 /*
1783                  * When removing a non-directory we need to log the parent
1784                  * inode here.  For a directory this is done implicitly
1785                  * by the xfs_droplink call for the ".." entry.
1786                  */
1787                 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1788         }
1789
1790         /*
1791          * Drop the link from dp to ip.
1792          */
1793         error = xfs_droplink(tp, ip);
1794         if (error)
1795                 goto out_bmap_cancel;
1796
1797         /*
1798          * Determine if this is the last link while
1799          * we are in the transaction.
1800          */
1801         link_zero = (ip->i_d.di_nlink == 0);
1802
1803         /*
1804          * If this is a synchronous mount, make sure that the
1805          * remove transaction goes to disk before returning to
1806          * the user.
1807          */
1808         if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1809                 xfs_trans_set_sync(tp);
1810
1811         error = xfs_bmap_finish(&tp, &free_list, &committed);
1812         if (error)
1813                 goto out_bmap_cancel;
1814
1815         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1816         if (error)
1817                 goto std_return;
1818
1819         /*
1820          * If we are using filestreams, kill the stream association.
1821          * If the file is still open it may get a new one but that
1822          * will get killed on last close in xfs_close() so we don't
1823          * have to worry about that.
1824          */
1825         if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
1826                 xfs_filestream_deassociate(ip);
1827
1828         return 0;
1829
1830  out_bmap_cancel:
1831         xfs_bmap_cancel(&free_list);
1832         cancel_flags |= XFS_TRANS_ABORT;
1833  out_trans_cancel:
1834         xfs_trans_cancel(tp, cancel_flags);
1835  std_return:
1836         return error;
1837 }
1838
1839 int
1840 xfs_link(
1841         xfs_inode_t             *tdp,
1842         xfs_inode_t             *sip,
1843         struct xfs_name         *target_name)
1844 {
1845         xfs_mount_t             *mp = tdp->i_mount;
1846         xfs_trans_t             *tp;
1847         int                     error;
1848         xfs_bmap_free_t         free_list;
1849         xfs_fsblock_t           first_block;
1850         int                     cancel_flags;
1851         int                     committed;
1852         int                     resblks;
1853
1854         xfs_itrace_entry(tdp);
1855         xfs_itrace_entry(sip);
1856
1857         ASSERT(!S_ISDIR(sip->i_d.di_mode));
1858
1859         if (XFS_FORCED_SHUTDOWN(mp))
1860                 return XFS_ERROR(EIO);
1861
1862         error = xfs_qm_dqattach(sip, 0);
1863         if (error)
1864                 goto std_return;
1865
1866         error = xfs_qm_dqattach(tdp, 0);
1867         if (error)
1868                 goto std_return;
1869
1870         tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
1871         cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1872         resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1873         error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0,
1874                         XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
1875         if (error == ENOSPC) {
1876                 resblks = 0;
1877                 error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0,
1878                                 XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
1879         }
1880         if (error) {
1881                 cancel_flags = 0;
1882                 goto error_return;
1883         }
1884
1885         xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1886
1887         /*
1888          * Increment vnode ref counts since xfs_trans_commit &
1889          * xfs_trans_cancel will both unlock the inodes and
1890          * decrement the associated ref counts.
1891          */
1892         IHOLD(sip);
1893         IHOLD(tdp);
1894         xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1895         xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1896
1897         /*
1898          * If the source has too many links, we can't make any more to it.
1899          */
1900         if (sip->i_d.di_nlink >= XFS_MAXLINK) {
1901                 error = XFS_ERROR(EMLINK);
1902                 goto error_return;
1903         }
1904
1905         /*
1906          * If we are using project inheritance, we only allow hard link
1907          * creation in our tree when the project IDs are the same; else
1908          * the tree quota mechanism could be circumvented.
1909          */
1910         if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1911                      (tdp->i_d.di_projid != sip->i_d.di_projid))) {
1912                 error = XFS_ERROR(EXDEV);
1913                 goto error_return;
1914         }
1915
1916         error = xfs_dir_canenter(tp, tdp, target_name, resblks);
1917         if (error)
1918                 goto error_return;
1919
1920         xfs_bmap_init(&free_list, &first_block);
1921
1922         error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1923                                         &first_block, &free_list, resblks);
1924         if (error)
1925                 goto abort_return;
1926         xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1927         xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1928
1929         error = xfs_bumplink(tp, sip);
1930         if (error)
1931                 goto abort_return;
1932
1933         /*
1934          * If this is a synchronous mount, make sure that the
1935          * link transaction goes to disk before returning to
1936          * the user.
1937          */
1938         if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
1939                 xfs_trans_set_sync(tp);
1940         }
1941
1942         error = xfs_bmap_finish (&tp, &free_list, &committed);
1943         if (error) {
1944                 xfs_bmap_cancel(&free_list);
1945                 goto abort_return;
1946         }
1947
1948         return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1949
1950  abort_return:
1951         cancel_flags |= XFS_TRANS_ABORT;
1952  error_return:
1953         xfs_trans_cancel(tp, cancel_flags);
1954  std_return:
1955         return error;
1956 }
1957
1958 int
1959 xfs_symlink(
1960         xfs_inode_t             *dp,
1961         struct xfs_name         *link_name,
1962         const char              *target_path,
1963         mode_t                  mode,
1964         xfs_inode_t             **ipp,
1965         cred_t                  *credp)
1966 {
1967         xfs_mount_t             *mp = dp->i_mount;
1968         xfs_trans_t             *tp;
1969         xfs_inode_t             *ip;
1970         int                     error;
1971         int                     pathlen;
1972         xfs_bmap_free_t         free_list;
1973         xfs_fsblock_t           first_block;
1974         boolean_t               unlock_dp_on_error = B_FALSE;
1975         uint                    cancel_flags;
1976         int                     committed;
1977         xfs_fileoff_t           first_fsb;
1978         xfs_filblks_t           fs_blocks;
1979         int                     nmaps;
1980         xfs_bmbt_irec_t         mval[SYMLINK_MAPS];
1981         xfs_daddr_t             d;
1982         const char              *cur_chunk;
1983         int                     byte_cnt;
1984         int                     n;
1985         xfs_buf_t               *bp;
1986         xfs_prid_t              prid;
1987         struct xfs_dquot        *udqp, *gdqp;
1988         uint                    resblks;
1989
1990         *ipp = NULL;
1991         error = 0;
1992         ip = NULL;
1993         tp = NULL;
1994
1995         xfs_itrace_entry(dp);
1996
1997         if (XFS_FORCED_SHUTDOWN(mp))
1998                 return XFS_ERROR(EIO);
1999
2000         /*
2001          * Check component lengths of the target path name.
2002          */
2003         pathlen = strlen(target_path);
2004         if (pathlen >= MAXPATHLEN)      /* total string too long */
2005                 return XFS_ERROR(ENAMETOOLONG);
2006
2007         udqp = gdqp = NULL;
2008         if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
2009                 prid = dp->i_d.di_projid;
2010         else
2011                 prid = (xfs_prid_t)dfltprid;
2012
2013         /*
2014          * Make sure that we have allocated dquot(s) on disk.
2015          */
2016         error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
2017                         XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
2018         if (error)
2019                 goto std_return;
2020
2021         tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
2022         cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
2023         /*
2024          * The symlink will fit into the inode data fork?
2025          * There can't be any attributes so we get the whole variable part.
2026          */
2027         if (pathlen <= XFS_LITINO(mp))
2028                 fs_blocks = 0;
2029         else
2030                 fs_blocks = XFS_B_TO_FSB(mp, pathlen);
2031         resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
2032         error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
2033                         XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
2034         if (error == ENOSPC && fs_blocks == 0) {
2035                 resblks = 0;
2036                 error = xfs_trans_reserve(tp, 0, XFS_SYMLINK_LOG_RES(mp), 0,
2037                                 XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
2038         }
2039         if (error) {
2040                 cancel_flags = 0;
2041                 goto error_return;
2042         }
2043
2044         xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
2045         unlock_dp_on_error = B_TRUE;
2046
2047         /*
2048          * Check whether the directory allows new symlinks or not.
2049          */
2050         if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
2051                 error = XFS_ERROR(EPERM);
2052                 goto error_return;
2053         }
2054
2055         /*
2056          * Reserve disk quota : blocks and inode.
2057          */
2058         error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
2059         if (error)
2060                 goto error_return;
2061
2062         /*
2063          * Check for ability to enter directory entry, if no space reserved.
2064          */
2065         error = xfs_dir_canenter(tp, dp, link_name, resblks);
2066         if (error)
2067                 goto error_return;
2068         /*
2069          * Initialize the bmap freelist prior to calling either
2070          * bmapi or the directory create code.
2071          */
2072         xfs_bmap_init(&free_list, &first_block);
2073
2074         /*
2075          * Allocate an inode for the symlink.
2076          */
2077         error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT),
2078                                1, 0, credp, prid, resblks > 0, &ip, NULL);
2079         if (error) {
2080                 if (error == ENOSPC)
2081                         goto error_return;
2082                 goto error1;
2083         }
2084
2085         /*
2086          * An error after we've joined dp to the transaction will result in the
2087          * transaction cancel unlocking dp so don't do it explicitly in the
2088          * error path.
2089          */
2090         IHOLD(dp);
2091         xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2092         unlock_dp_on_error = B_FALSE;
2093
2094         /*
2095          * Also attach the dquot(s) to it, if applicable.
2096          */
2097         xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
2098
2099         if (resblks)
2100                 resblks -= XFS_IALLOC_SPACE_RES(mp);
2101         /*
2102          * If the symlink will fit into the inode, write it inline.
2103          */
2104         if (pathlen <= XFS_IFORK_DSIZE(ip)) {
2105                 xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
2106                 memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
2107                 ip->i_d.di_size = pathlen;
2108
2109                 /*
2110                  * The inode was initially created in extent format.
2111                  */
2112                 ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
2113                 ip->i_df.if_flags |= XFS_IFINLINE;
2114
2115                 ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
2116                 xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
2117
2118         } else {
2119                 first_fsb = 0;
2120                 nmaps = SYMLINK_MAPS;
2121
2122                 error = xfs_bmapi(tp, ip, first_fsb, fs_blocks,
2123                                   XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
2124                                   &first_block, resblks, mval, &nmaps,
2125                                   &free_list, NULL);
2126                 if (error) {
2127                         goto error1;
2128                 }
2129
2130                 if (resblks)
2131                         resblks -= fs_blocks;
2132                 ip->i_d.di_size = pathlen;
2133                 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2134
2135                 cur_chunk = target_path;
2136                 for (n = 0; n < nmaps; n++) {
2137                         d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
2138                         byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
2139                         bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
2140                                                BTOBB(byte_cnt), 0);
2141                         ASSERT(bp && !XFS_BUF_GETERROR(bp));
2142                         if (pathlen < byte_cnt) {
2143                                 byte_cnt = pathlen;
2144                         }
2145                         pathlen -= byte_cnt;
2146
2147                         memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt);
2148                         cur_chunk += byte_cnt;
2149
2150                         xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1);
2151                 }
2152         }
2153
2154         /*
2155          * Create the directory entry for the symlink.
2156          */
2157         error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
2158                                         &first_block, &free_list, resblks);
2159         if (error)
2160                 goto error1;
2161         xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2162         xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2163
2164         /*
2165          * If this is a synchronous mount, make sure that the
2166          * symlink transaction goes to disk before returning to
2167          * the user.
2168          */
2169         if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
2170                 xfs_trans_set_sync(tp);
2171         }
2172
2173         /*
2174          * xfs_trans_commit normally decrements the vnode ref count
2175          * when it unlocks the inode. Since we want to return the
2176          * vnode to the caller, we bump the vnode ref count now.
2177          */
2178         IHOLD(ip);
2179
2180         error = xfs_bmap_finish(&tp, &free_list, &committed);
2181         if (error) {
2182                 goto error2;
2183         }
2184         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2185         xfs_qm_dqrele(udqp);
2186         xfs_qm_dqrele(gdqp);
2187
2188         *ipp = ip;
2189         return 0;
2190
2191  error2:
2192         IRELE(ip);
2193  error1:
2194         xfs_bmap_cancel(&free_list);
2195         cancel_flags |= XFS_TRANS_ABORT;
2196  error_return:
2197         xfs_trans_cancel(tp, cancel_flags);
2198         xfs_qm_dqrele(udqp);
2199         xfs_qm_dqrele(gdqp);
2200
2201         if (unlock_dp_on_error)
2202                 xfs_iunlock(dp, XFS_ILOCK_EXCL);
2203  std_return:
2204         return error;
2205 }
2206
2207 int
2208 xfs_set_dmattrs(
2209         xfs_inode_t     *ip,
2210         u_int           evmask,
2211         u_int16_t       state)
2212 {
2213         xfs_mount_t     *mp = ip->i_mount;
2214         xfs_trans_t     *tp;
2215         int             error;
2216
2217         if (!capable(CAP_SYS_ADMIN))
2218                 return XFS_ERROR(EPERM);
2219
2220         if (XFS_FORCED_SHUTDOWN(mp))
2221                 return XFS_ERROR(EIO);
2222
2223         tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS);
2224         error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0);
2225         if (error) {
2226                 xfs_trans_cancel(tp, 0);
2227                 return error;
2228         }
2229         xfs_ilock(ip, XFS_ILOCK_EXCL);
2230         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2231
2232         ip->i_d.di_dmevmask = evmask;
2233         ip->i_d.di_dmstate  = state;
2234
2235         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2236         IHOLD(ip);
2237         error = xfs_trans_commit(tp, 0);
2238
2239         return error;
2240 }
2241
2242 /*
2243  * xfs_alloc_file_space()
2244  *      This routine allocates disk space for the given file.
2245  *
2246  *      If alloc_type == 0, this request is for an ALLOCSP type
2247  *      request which will change the file size.  In this case, no
2248  *      DMAPI event will be generated by the call.  A TRUNCATE event
2249  *      will be generated later by xfs_setattr.
2250  *
2251  *      If alloc_type != 0, this request is for a RESVSP type
2252  *      request, and a DMAPI DM_EVENT_WRITE will be generated if the
2253  *      lower block boundary byte address is less than the file's
2254  *      length.
2255  *
2256  * RETURNS:
2257  *       0 on success
2258  *      errno on error
2259  *
2260  */
2261 STATIC int
2262 xfs_alloc_file_space(
2263         xfs_inode_t             *ip,
2264         xfs_off_t               offset,
2265         xfs_off_t               len,
2266         int                     alloc_type,
2267         int                     attr_flags)
2268 {
2269         xfs_mount_t             *mp = ip->i_mount;
2270         xfs_off_t               count;
2271         xfs_filblks_t           allocated_fsb;
2272         xfs_filblks_t           allocatesize_fsb;
2273         xfs_extlen_t            extsz, temp;
2274         xfs_fileoff_t           startoffset_fsb;
2275         xfs_fsblock_t           firstfsb;
2276         int                     nimaps;
2277         int                     bmapi_flag;
2278         int                     quota_flag;
2279         int                     rt;
2280         xfs_trans_t             *tp;
2281         xfs_bmbt_irec_t         imaps[1], *imapp;
2282         xfs_bmap_free_t         free_list;
2283         uint                    qblocks, resblks, resrtextents;
2284         int                     committed;
2285         int                     error;
2286
2287         xfs_itrace_entry(ip);
2288
2289         if (XFS_FORCED_SHUTDOWN(mp))
2290                 return XFS_ERROR(EIO);
2291
2292         error = xfs_qm_dqattach(ip, 0);
2293         if (error)
2294                 return error;
2295
2296         if (len <= 0)
2297                 return XFS_ERROR(EINVAL);
2298
2299         rt = XFS_IS_REALTIME_INODE(ip);
2300         extsz = xfs_get_extsz_hint(ip);
2301
2302         count = len;
2303         imapp = &imaps[0];
2304         nimaps = 1;
2305         bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
2306         startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
2307         allocatesize_fsb = XFS_B_TO_FSB(mp, count);
2308
2309         /*
2310          * Allocate file space until done or until there is an error
2311          */
2312         while (allocatesize_fsb && !error) {
2313                 xfs_fileoff_t   s, e;
2314
2315                 /*
2316                  * Determine space reservations for data/realtime.
2317                  */
2318                 if (unlikely(extsz)) {
2319                         s = startoffset_fsb;
2320                         do_div(s, extsz);
2321                         s *= extsz;
2322                         e = startoffset_fsb + allocatesize_fsb;
2323                         if ((temp = do_mod(startoffset_fsb, extsz)))
2324                                 e += temp;
2325                         if ((temp = do_mod(e, extsz)))
2326                                 e += extsz - temp;
2327                 } else {
2328                         s = 0;
2329                         e = allocatesize_fsb;
2330                 }
2331
2332                 if (unlikely(rt)) {
2333                         resrtextents = qblocks = (uint)(e - s);
2334                         resrtextents /= mp->m_sb.sb_rextsize;
2335                         resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
2336                         quota_flag = XFS_QMOPT_RES_RTBLKS;
2337                 } else {
2338                         resrtextents = 0;
2339                         resblks = qblocks = \
2340                                 XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
2341                         quota_flag = XFS_QMOPT_RES_REGBLKS;
2342                 }
2343
2344                 /*
2345                  * Allocate and setup the transaction.
2346                  */
2347                 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
2348                 error = xfs_trans_reserve(tp, resblks,
2349                                           XFS_WRITE_LOG_RES(mp), resrtextents,
2350                                           XFS_TRANS_PERM_LOG_RES,
2351                                           XFS_WRITE_LOG_COUNT);
2352                 /*
2353                  * Check for running out of space
2354                  */
2355                 if (error) {
2356                         /*
2357                          * Free the transaction structure.
2358                          */
2359                         ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
2360                         xfs_trans_cancel(tp, 0);
2361                         break;
2362                 }
2363                 xfs_ilock(ip, XFS_ILOCK_EXCL);
2364                 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
2365                                                       0, quota_flag);
2366                 if (error)
2367                         goto error1;
2368
2369                 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2370                 xfs_trans_ihold(tp, ip);
2371
2372                 /*
2373                  * Issue the xfs_bmapi() call to allocate the blocks
2374                  */
2375                 xfs_bmap_init(&free_list, &firstfsb);
2376                 error = xfs_bmapi(tp, ip, startoffset_fsb,
2377                                   allocatesize_fsb, bmapi_flag,
2378                                   &firstfsb, 0, imapp, &nimaps,
2379                                   &free_list, NULL);
2380                 if (error) {
2381                         goto error0;
2382                 }
2383
2384                 /*
2385                  * Complete the transaction
2386                  */
2387                 error = xfs_bmap_finish(&tp, &free_list, &committed);
2388                 if (error) {
2389                         goto error0;
2390                 }
2391
2392                 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2393                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2394                 if (error) {
2395                         break;
2396                 }
2397
2398                 allocated_fsb = imapp->br_blockcount;
2399
2400                 if (nimaps == 0) {
2401                         error = XFS_ERROR(ENOSPC);
2402                         break;
2403                 }
2404
2405                 startoffset_fsb += allocated_fsb;
2406                 allocatesize_fsb -= allocated_fsb;
2407         }
2408
2409         return error;
2410
2411 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
2412         xfs_bmap_cancel(&free_list);
2413         xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
2414
2415 error1: /* Just cancel transaction */
2416         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
2417         xfs_iunlock(ip, XFS_ILOCK_EXCL);
2418         return error;
2419 }
2420
2421 /*
2422  * Zero file bytes between startoff and endoff inclusive.
2423  * The iolock is held exclusive and no blocks are buffered.
2424  *
2425  * This function is used by xfs_free_file_space() to zero
2426  * partial blocks when the range to free is not block aligned.
2427  * When unreserving space with boundaries that are not block
2428  * aligned we round up the start and round down the end
2429  * boundaries and then use this function to zero the parts of
2430  * the blocks that got dropped during the rounding.
2431  */
2432 STATIC int
2433 xfs_zero_remaining_bytes(
2434         xfs_inode_t             *ip,
2435         xfs_off_t               startoff,
2436         xfs_off_t               endoff)
2437 {
2438         xfs_bmbt_irec_t         imap;
2439         xfs_fileoff_t           offset_fsb;
2440         xfs_off_t               lastoffset;
2441         xfs_off_t               offset;
2442         xfs_buf_t               *bp;
2443         xfs_mount_t             *mp = ip->i_mount;
2444         int                     nimap;
2445         int                     error = 0;
2446
2447         /*
2448          * Avoid doing I/O beyond eof - it's not necessary
2449          * since nothing can read beyond eof.  The space will
2450          * be zeroed when the file is extended anyway.
2451          */
2452         if (startoff >= ip->i_size)
2453                 return 0;
2454
2455         if (endoff > ip->i_size)
2456                 endoff = ip->i_size;
2457
2458         bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize,
2459                                 XFS_IS_REALTIME_INODE(ip) ?
2460                                 mp->m_rtdev_targp : mp->m_ddev_targp);
2461         if (!bp)
2462                 return XFS_ERROR(ENOMEM);
2463
2464         for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
2465                 offset_fsb = XFS_B_TO_FSBT(mp, offset);
2466                 nimap = 1;
2467                 error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0,
2468                         NULL, 0, &imap, &nimap, NULL, NULL);
2469                 if (error || nimap < 1)
2470                         break;
2471                 ASSERT(imap.br_blockcount >= 1);
2472                 ASSERT(imap.br_startoff == offset_fsb);
2473                 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
2474                 if (lastoffset > endoff)
2475                         lastoffset = endoff;
2476                 if (imap.br_startblock == HOLESTARTBLOCK)
2477                         continue;
2478                 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
2479                 if (imap.br_state == XFS_EXT_UNWRITTEN)
2480                         continue;
2481                 XFS_BUF_UNDONE(bp);
2482                 XFS_BUF_UNWRITE(bp);
2483                 XFS_BUF_READ(bp);
2484                 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
2485                 xfsbdstrat(mp, bp);
2486                 error = xfs_iowait(bp);
2487                 if (error) {
2488                         xfs_ioerror_alert("xfs_zero_remaining_bytes(read)",
2489                                           mp, bp, XFS_BUF_ADDR(bp));
2490                         break;
2491                 }
2492                 memset(XFS_BUF_PTR(bp) +
2493                         (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
2494                       0, lastoffset - offset + 1);
2495                 XFS_BUF_UNDONE(bp);
2496                 XFS_BUF_UNREAD(bp);
2497                 XFS_BUF_WRITE(bp);
2498                 xfsbdstrat(mp, bp);
2499                 error = xfs_iowait(bp);
2500                 if (error) {
2501                         xfs_ioerror_alert("xfs_zero_remaining_bytes(write)",
2502                                           mp, bp, XFS_BUF_ADDR(bp));
2503                         break;
2504                 }
2505         }
2506         xfs_buf_free(bp);
2507         return error;
2508 }
2509
2510 /*
2511  * xfs_free_file_space()
2512  *      This routine frees disk space for the given file.
2513  *
2514  *      This routine is only called by xfs_change_file_space
2515  *      for an UNRESVSP type call.
2516  *
2517  * RETURNS:
2518  *       0 on success
2519  *      errno on error
2520  *
2521  */
2522 STATIC int
2523 xfs_free_file_space(
2524         xfs_inode_t             *ip,
2525         xfs_off_t               offset,
2526         xfs_off_t               len,
2527         int                     attr_flags)
2528 {
2529         int                     committed;
2530         int                     done;
2531         xfs_fileoff_t           endoffset_fsb;
2532         int                     error;
2533         xfs_fsblock_t           firstfsb;
2534         xfs_bmap_free_t         free_list;
2535         xfs_bmbt_irec_t         imap;
2536         xfs_off_t               ioffset;
2537         xfs_extlen_t            mod=0;
2538         xfs_mount_t             *mp;
2539         int                     nimap;
2540         uint                    resblks;
2541         uint                    rounding;
2542         int                     rt;
2543         xfs_fileoff_t           startoffset_fsb;
2544         xfs_trans_t             *tp;
2545         int                     need_iolock = 1;
2546
2547         mp = ip->i_mount;
2548
2549         xfs_itrace_entry(ip);
2550
2551         error = xfs_qm_dqattach(ip, 0);
2552         if (error)
2553                 return error;
2554
2555         error = 0;
2556         if (len <= 0)   /* if nothing being freed */
2557                 return error;
2558         rt = XFS_IS_REALTIME_INODE(ip);
2559         startoffset_fsb = XFS_B_TO_FSB(mp, offset);
2560         endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
2561
2562         if (attr_flags & XFS_ATTR_NOLOCK)
2563                 need_iolock = 0;
2564         if (need_iolock) {
2565                 xfs_ilock(ip, XFS_IOLOCK_EXCL);
2566                 /* wait for the completion of any pending DIOs */
2567                 xfs_ioend_wait(ip);
2568         }
2569
2570         rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
2571         ioffset = offset & ~(rounding - 1);
2572
2573         if (VN_CACHED(VFS_I(ip)) != 0) {
2574                 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
2575                 if (error)
2576                         goto out_unlock_iolock;
2577         }
2578
2579         /*
2580          * Need to zero the stuff we're not freeing, on disk.
2581          * If it's a realtime file & can't use unwritten extents then we
2582          * actually need to zero the extent edges.  Otherwise xfs_bunmapi
2583          * will take care of it for us.
2584          */
2585         if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
2586                 nimap = 1;
2587                 error = xfs_bmapi(NULL, ip, startoffset_fsb,
2588                         1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
2589                 if (error)
2590                         goto out_unlock_iolock;
2591                 ASSERT(nimap == 0 || nimap == 1);
2592                 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
2593                         xfs_daddr_t     block;
2594
2595                         ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
2596                         block = imap.br_startblock;
2597                         mod = do_div(block, mp->m_sb.sb_rextsize);
2598                         if (mod)
2599                                 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
2600                 }
2601                 nimap = 1;
2602                 error = xfs_bmapi(NULL, ip, endoffset_fsb - 1,
2603                         1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
2604                 if (error)
2605                         goto out_unlock_iolock;
2606                 ASSERT(nimap == 0 || nimap == 1);
2607                 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
2608                         ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
2609                         mod++;
2610                         if (mod && (mod != mp->m_sb.sb_rextsize))
2611                                 endoffset_fsb -= mod;
2612                 }
2613         }
2614         if ((done = (endoffset_fsb <= startoffset_fsb)))
2615                 /*
2616                  * One contiguous piece to clear
2617                  */
2618                 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
2619         else {
2620                 /*
2621                  * Some full blocks, possibly two pieces to clear
2622                  */
2623                 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
2624                         error = xfs_zero_remaining_bytes(ip, offset,
2625                                 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
2626                 if (!error &&
2627                     XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
2628                         error = xfs_zero_remaining_bytes(ip,
2629                                 XFS_FSB_TO_B(mp, endoffset_fsb),
2630                                 offset + len - 1);
2631         }
2632
2633         /*
2634          * free file space until done or until there is an error
2635          */
2636         resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
2637         while (!error && !done) {
2638
2639                 /*
2640                  * allocate and setup the transaction. Allow this
2641                  * transaction to dip into the reserve blocks to ensure
2642                  * the freeing of the space succeeds at ENOSPC.
2643                  */
2644                 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
2645                 tp->t_flags |= XFS_TRANS_RESERVE;
2646                 error = xfs_trans_reserve(tp,
2647                                           resblks,
2648                                           XFS_WRITE_LOG_RES(mp),
2649                                           0,
2650                                           XFS_TRANS_PERM_LOG_RES,
2651                                           XFS_WRITE_LOG_COUNT);
2652
2653                 /*
2654                  * check for running out of space
2655                  */
2656                 if (error) {
2657                         /*
2658                          * Free the transaction structure.
2659                          */
2660                         ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
2661                         xfs_trans_cancel(tp, 0);
2662                         break;
2663                 }
2664                 xfs_ilock(ip, XFS_ILOCK_EXCL);
2665                 error = xfs_trans_reserve_quota(tp, mp,
2666                                 ip->i_udquot, ip->i_gdquot,
2667                                 resblks, 0, XFS_QMOPT_RES_REGBLKS);
2668                 if (error)
2669                         goto error1;
2670
2671                 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2672                 xfs_trans_ihold(tp, ip);
2673
2674                 /*
2675                  * issue the bunmapi() call to free the blocks
2676                  */
2677                 xfs_bmap_init(&free_list, &firstfsb);
2678                 error = xfs_bunmapi(tp, ip, startoffset_fsb,
2679                                   endoffset_fsb - startoffset_fsb,
2680                                   0, 2, &firstfsb, &free_list, NULL, &done);
2681                 if (error) {
2682                         goto error0;
2683                 }
2684
2685                 /*
2686                  * complete the transaction
2687                  */
2688                 error = xfs_bmap_finish(&tp, &free_list, &committed);
2689                 if (error) {
2690                         goto error0;
2691                 }
2692
2693                 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2694                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2695         }
2696
2697  out_unlock_iolock:
2698         if (need_iolock)
2699                 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
2700         return error;
2701
2702  error0:
2703         xfs_bmap_cancel(&free_list);
2704  error1:
2705         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
2706         xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
2707                     XFS_ILOCK_EXCL);
2708         return error;
2709 }
2710
2711 /*
2712  * xfs_change_file_space()
2713  *      This routine allocates or frees disk space for the given file.
2714  *      The user specified parameters are checked for alignment and size
2715  *      limitations.
2716  *
2717  * RETURNS:
2718  *       0 on success
2719  *      errno on error
2720  *
2721  */
2722 int
2723 xfs_change_file_space(
2724         xfs_inode_t     *ip,
2725         int             cmd,
2726         xfs_flock64_t   *bf,
2727         xfs_off_t       offset,
2728         int             attr_flags)
2729 {
2730         xfs_mount_t     *mp = ip->i_mount;
2731         int             clrprealloc;
2732         int             error;
2733         xfs_fsize_t     fsize;
2734         int             setprealloc;
2735         xfs_off_t       startoffset;
2736         xfs_off_t       llen;
2737         xfs_trans_t     *tp;
2738         struct iattr    iattr;
2739
2740         xfs_itrace_entry(ip);
2741
2742         if (!S_ISREG(ip->i_d.di_mode))
2743                 return XFS_ERROR(EINVAL);
2744
2745         switch (bf->l_whence) {
2746         case 0: /*SEEK_SET*/
2747                 break;
2748         case 1: /*SEEK_CUR*/
2749                 bf->l_start += offset;
2750                 break;
2751         case 2: /*SEEK_END*/
2752                 bf->l_start += ip->i_size;
2753                 break;
2754         default:
2755                 return XFS_ERROR(EINVAL);
2756         }
2757
2758         llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len;
2759
2760         if (   (bf->l_start < 0)
2761             || (bf->l_start > XFS_MAXIOFFSET(mp))
2762             || (bf->l_start + llen < 0)
2763             || (bf->l_start + llen > XFS_MAXIOFFSET(mp)))
2764                 return XFS_ERROR(EINVAL);
2765
2766         bf->l_whence = 0;
2767
2768         startoffset = bf->l_start;
2769         fsize = ip->i_size;
2770
2771         /*
2772          * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve
2773          * file space.
2774          * These calls do NOT zero the data space allocated to the file,
2775          * nor do they change the file size.
2776          *
2777          * XFS_IOC_ALLOCSP and XFS_IOC_FREESP will allocate and free file
2778          * space.
2779          * These calls cause the new file data to be zeroed and the file
2780          * size to be changed.
2781          */
2782         setprealloc = clrprealloc = 0;
2783
2784         switch (cmd) {
2785         case XFS_IOC_RESVSP:
2786         case XFS_IOC_RESVSP64:
2787                 error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
2788                                                                 1, attr_flags);
2789                 if (error)
2790                         return error;
2791                 setprealloc = 1;
2792                 break;
2793
2794         case XFS_IOC_UNRESVSP:
2795         case XFS_IOC_UNRESVSP64:
2796                 if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
2797                                                                 attr_flags)))
2798                         return error;
2799                 break;
2800
2801         case XFS_IOC_ALLOCSP:
2802         case XFS_IOC_ALLOCSP64:
2803         case XFS_IOC_FREESP:
2804         case XFS_IOC_FREESP64:
2805                 if (startoffset > fsize) {
2806                         error = xfs_alloc_file_space(ip, fsize,
2807                                         startoffset - fsize, 0, attr_flags);
2808                         if (error)
2809                                 break;
2810                 }
2811
2812                 iattr.ia_valid = ATTR_SIZE;
2813                 iattr.ia_size = startoffset;
2814
2815                 error = xfs_setattr(ip, &iattr, attr_flags);
2816
2817                 if (error)
2818                         return error;
2819
2820                 clrprealloc = 1;
2821                 break;
2822
2823         default:
2824                 ASSERT(0);
2825                 return XFS_ERROR(EINVAL);
2826         }
2827
2828         /*
2829          * update the inode timestamp, mode, and prealloc flag bits
2830          */
2831         tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
2832
2833         if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp),
2834                                       0, 0, 0))) {
2835                 /* ASSERT(0); */
2836                 xfs_trans_cancel(tp, 0);
2837                 return error;
2838         }
2839
2840         xfs_ilock(ip, XFS_ILOCK_EXCL);
2841
2842         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2843         xfs_trans_ihold(tp, ip);
2844
2845         if ((attr_flags & XFS_ATTR_DMI) == 0) {
2846                 ip->i_d.di_mode &= ~S_ISUID;
2847
2848                 /*
2849                  * Note that we don't have to worry about mandatory
2850                  * file locking being disabled here because we only
2851                  * clear the S_ISGID bit if the Group execute bit is
2852                  * on, but if it was on then mandatory locking wouldn't
2853                  * have been enabled.
2854                  */
2855                 if (ip->i_d.di_mode & S_IXGRP)
2856                         ip->i_d.di_mode &= ~S_ISGID;
2857
2858                 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2859         }
2860         if (setprealloc)
2861                 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
2862         else if (clrprealloc)
2863                 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
2864
2865         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2866         xfs_trans_set_sync(tp);
2867
2868         error = xfs_trans_commit(tp, 0);
2869
2870         xfs_iunlock(ip, XFS_ILOCK_EXCL);
2871
2872         return error;
2873 }