2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
18 #include <linux/mount.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/aio.h>
29 #include <linux/delay.h>
46 * gfs2_llseek - seek to a location in a file
49 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
51 * SEEK_END requires the glock for the file because it references the
54 * Returns: The new offset, or errno
57 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
59 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
60 struct gfs2_holder i_gh;
64 case SEEK_END: /* These reference inode->i_size */
67 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
70 error = generic_file_llseek(file, offset, whence);
71 gfs2_glock_dq_uninit(&i_gh);
76 error = generic_file_llseek(file, offset, whence);
86 * gfs2_readdir - Iterator for a directory
87 * @file: The directory to read from
88 * @ctx: What to feed directory entries to
93 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
95 struct inode *dir = file->f_mapping->host;
96 struct gfs2_inode *dip = GFS2_I(dir);
97 struct gfs2_holder d_gh;
100 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
104 error = gfs2_dir_read(dir, ctx, &file->f_ra);
106 gfs2_glock_dq_uninit(&d_gh);
113 * @table: A table of 32 u32 flags
114 * @val: a 32 bit value to convert
116 * This function can be used to convert between fsflags values and
117 * GFS2's own flags values.
119 * Returns: the converted flags
121 static u32 fsflags_cvt(const u32 *table, u32 val)
133 static const u32 fsflags_to_gfs2[32] = {
135 [4] = GFS2_DIF_IMMUTABLE,
136 [5] = GFS2_DIF_APPENDONLY,
137 [7] = GFS2_DIF_NOATIME,
138 [12] = GFS2_DIF_EXHASH,
139 [14] = GFS2_DIF_INHERIT_JDATA,
140 [17] = GFS2_DIF_TOPDIR,
143 static const u32 gfs2_to_fsflags[32] = {
144 [gfs2fl_Sync] = FS_SYNC_FL,
145 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
146 [gfs2fl_AppendOnly] = FS_APPEND_FL,
147 [gfs2fl_NoAtime] = FS_NOATIME_FL,
148 [gfs2fl_ExHash] = FS_INDEX_FL,
149 [gfs2fl_TopLevel] = FS_TOPDIR_FL,
150 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
153 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
155 struct inode *inode = file_inode(filp);
156 struct gfs2_inode *ip = GFS2_I(inode);
157 struct gfs2_holder gh;
161 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
162 error = gfs2_glock_nq(&gh);
166 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
167 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
168 fsflags |= FS_JOURNAL_DATA_FL;
169 if (put_user(fsflags, ptr))
173 gfs2_holder_uninit(&gh);
177 void gfs2_set_inode_flags(struct inode *inode)
179 struct gfs2_inode *ip = GFS2_I(inode);
180 unsigned int flags = inode->i_flags;
182 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
183 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
184 inode->i_flags |= S_NOSEC;
185 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
186 flags |= S_IMMUTABLE;
187 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
189 if (ip->i_diskflags & GFS2_DIF_NOATIME)
191 if (ip->i_diskflags & GFS2_DIF_SYNC)
193 inode->i_flags = flags;
196 /* Flags that can be set by user space */
197 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
198 GFS2_DIF_IMMUTABLE| \
199 GFS2_DIF_APPENDONLY| \
204 GFS2_DIF_INHERIT_JDATA)
207 * do_gfs2_set_flags - set flags on an inode
208 * @filp: file pointer
209 * @reqflags: The flags to set
210 * @mask: Indicates which flags are valid
213 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
215 struct inode *inode = file_inode(filp);
216 struct gfs2_inode *ip = GFS2_I(inode);
217 struct gfs2_sbd *sdp = GFS2_SB(inode);
218 struct buffer_head *bh;
219 struct gfs2_holder gh;
221 u32 new_flags, flags;
223 error = mnt_want_write_file(filp);
227 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
232 if (!inode_owner_or_capable(inode))
236 flags = ip->i_diskflags;
237 new_flags = (flags & ~mask) | (reqflags & mask);
238 if ((new_flags ^ flags) == 0)
242 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
246 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
248 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
250 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
251 !capable(CAP_LINUX_IMMUTABLE))
253 if (!IS_IMMUTABLE(inode)) {
254 error = gfs2_permission(inode, MAY_WRITE);
258 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
259 if (flags & GFS2_DIF_JDATA)
260 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
261 error = filemap_fdatawrite(inode->i_mapping);
264 error = filemap_fdatawait(inode->i_mapping);
268 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
271 error = gfs2_meta_inode_buffer(ip, &bh);
274 gfs2_trans_add_meta(ip->i_gl, bh);
275 ip->i_diskflags = new_flags;
276 gfs2_dinode_out(ip, bh->b_data);
278 gfs2_set_inode_flags(inode);
279 gfs2_set_aops(inode);
283 gfs2_glock_dq_uninit(&gh);
285 mnt_drop_write_file(filp);
289 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
291 struct inode *inode = file_inode(filp);
292 u32 fsflags, gfsflags;
294 if (get_user(fsflags, ptr))
297 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
298 if (!S_ISDIR(inode->i_mode)) {
299 gfsflags &= ~GFS2_DIF_TOPDIR;
300 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
301 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
302 return do_gfs2_set_flags(filp, gfsflags, ~0);
304 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
307 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
310 case FS_IOC_GETFLAGS:
311 return gfs2_get_flags(filp, (u32 __user *)arg);
312 case FS_IOC_SETFLAGS:
313 return gfs2_set_flags(filp, (u32 __user *)arg);
315 return gfs2_fitrim(filp, (void __user *)arg);
321 * gfs2_size_hint - Give a hint to the size of a write request
322 * @filep: The struct file
323 * @offset: The file offset of the write
324 * @size: The length of the write
326 * When we are about to do a write, this function records the total
327 * write size in order to provide a suitable hint to the lower layers
328 * about how many blocks will be required.
332 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
334 struct inode *inode = file_inode(filep);
335 struct gfs2_sbd *sdp = GFS2_SB(inode);
336 struct gfs2_inode *ip = GFS2_I(inode);
337 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
338 int hint = min_t(size_t, INT_MAX, blks);
340 atomic_set(&ip->i_res->rs_sizehint, hint);
344 * gfs2_allocate_page_backing - Use bmap to allocate blocks
345 * @page: The (locked) page to allocate backing for
347 * We try to allocate all the blocks required for the page in
348 * one go. This might fail for various reasons, so we keep
349 * trying until all the blocks to back this page are allocated.
350 * If some of the blocks are already allocated, thats ok too.
353 static int gfs2_allocate_page_backing(struct page *page)
355 struct inode *inode = page->mapping->host;
356 struct buffer_head bh;
357 unsigned long size = PAGE_CACHE_SIZE;
358 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
363 gfs2_block_map(inode, lblock, &bh, 1);
364 if (!buffer_mapped(&bh))
367 lblock += (bh.b_size >> inode->i_blkbits);
373 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
374 * @vma: The virtual memory area
375 * @vmf: The virtual memory fault containing the page to become writable
377 * When the page becomes writable, we need to ensure that we have
378 * blocks allocated on disk to back that page.
381 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
383 struct page *page = vmf->page;
384 struct inode *inode = file_inode(vma->vm_file);
385 struct gfs2_inode *ip = GFS2_I(inode);
386 struct gfs2_sbd *sdp = GFS2_SB(inode);
387 struct gfs2_alloc_parms ap = { .aflags = 0, };
388 unsigned long last_index;
389 u64 pos = page->index << PAGE_CACHE_SHIFT;
390 unsigned int data_blocks, ind_blocks, rblocks;
391 struct gfs2_holder gh;
395 sb_start_pagefault(inode->i_sb);
397 /* Update file times before taking page lock */
398 file_update_time(vma->vm_file);
400 ret = get_write_access(inode);
404 ret = gfs2_rs_alloc(ip);
406 goto out_write_access;
408 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
410 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
411 ret = gfs2_glock_nq(&gh);
415 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
416 set_bit(GIF_SW_PAGED, &ip->i_flags);
418 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
420 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
427 ret = gfs2_rindex_update(sdp);
431 ret = gfs2_quota_lock_check(ip);
434 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
435 ap.target = data_blocks + ind_blocks;
436 ret = gfs2_inplace_reserve(ip, &ap);
438 goto out_quota_unlock;
440 rblocks = RES_DINODE + ind_blocks;
441 if (gfs2_is_jdata(ip))
442 rblocks += data_blocks ? data_blocks : 1;
443 if (ind_blocks || data_blocks) {
444 rblocks += RES_STATFS + RES_QUOTA;
445 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
447 ret = gfs2_trans_begin(sdp, rblocks, 0);
453 size = i_size_read(inode);
454 last_index = (size - 1) >> PAGE_CACHE_SHIFT;
455 /* Check page index against inode size */
456 if (size == 0 || (page->index > last_index))
460 /* If truncated, we must retry the operation, we may have raced
461 * with the glock demotion code.
463 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
466 /* Unstuff, if required, and allocate backing blocks for page */
468 if (gfs2_is_stuffed(ip))
469 ret = gfs2_unstuff_dinode(ip, page);
471 ret = gfs2_allocate_page_backing(page);
478 gfs2_inplace_release(ip);
480 gfs2_quota_unlock(ip);
484 gfs2_holder_uninit(&gh);
486 set_page_dirty(page);
487 wait_for_stable_page(page);
490 put_write_access(inode);
492 sb_end_pagefault(inode->i_sb);
493 return block_page_mkwrite_return(ret);
496 static const struct vm_operations_struct gfs2_vm_ops = {
497 .fault = filemap_fault,
498 .map_pages = filemap_map_pages,
499 .page_mkwrite = gfs2_page_mkwrite,
500 .remap_pages = generic_file_remap_pages,
505 * @file: The file to map
506 * @vma: The VMA which described the mapping
508 * There is no need to get a lock here unless we should be updating
509 * atime. We ignore any locking errors since the only consequence is
510 * a missed atime update (which will just be deferred until later).
515 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
517 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
519 if (!(file->f_flags & O_NOATIME) &&
520 !IS_NOATIME(&ip->i_inode)) {
521 struct gfs2_holder i_gh;
524 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
528 /* grab lock to update inode */
529 gfs2_glock_dq_uninit(&i_gh);
532 vma->vm_ops = &gfs2_vm_ops;
538 * gfs2_open_common - This is common to open and atomic_open
539 * @inode: The inode being opened
540 * @file: The file being opened
542 * This maybe called under a glock or not depending upon how it has
543 * been called. We must always be called under a glock for regular
544 * files, however. For other file types, it does not matter whether
545 * we hold the glock or not.
547 * Returns: Error code or 0 for success
550 int gfs2_open_common(struct inode *inode, struct file *file)
552 struct gfs2_file *fp;
555 if (S_ISREG(inode->i_mode)) {
556 ret = generic_file_open(inode, file);
561 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
565 mutex_init(&fp->f_fl_mutex);
567 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
568 file->private_data = fp;
573 * gfs2_open - open a file
574 * @inode: the inode to open
575 * @file: the struct file for this opening
577 * After atomic_open, this function is only used for opening files
578 * which are already cached. We must still get the glock for regular
579 * files to ensure that we have the file size uptodate for the large
580 * file check which is in the common code. That is only an issue for
581 * regular files though.
586 static int gfs2_open(struct inode *inode, struct file *file)
588 struct gfs2_inode *ip = GFS2_I(inode);
589 struct gfs2_holder i_gh;
591 bool need_unlock = false;
593 if (S_ISREG(ip->i_inode.i_mode)) {
594 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
601 error = gfs2_open_common(inode, file);
604 gfs2_glock_dq_uninit(&i_gh);
610 * gfs2_release - called to close a struct file
611 * @inode: the inode the struct file belongs to
612 * @file: the struct file being closed
617 static int gfs2_release(struct inode *inode, struct file *file)
619 struct gfs2_inode *ip = GFS2_I(inode);
621 kfree(file->private_data);
622 file->private_data = NULL;
624 if (!(file->f_mode & FMODE_WRITE))
627 gfs2_rs_delete(ip, &inode->i_writecount);
632 * gfs2_fsync - sync the dirty data for a file (across the cluster)
633 * @file: the file that points to the dentry
634 * @start: the start position in the file to sync
635 * @end: the end position in the file to sync
636 * @datasync: set if we can ignore timestamp changes
638 * We split the data flushing here so that we don't wait for the data
639 * until after we've also sent the metadata to disk. Note that for
640 * data=ordered, we will write & wait for the data at the log flush
641 * stage anyway, so this is unlikely to make much of a difference
642 * except in the data=writeback case.
644 * If the fdatawrite fails due to any reason except -EIO, we will
645 * continue the remainder of the fsync, although we'll still report
646 * the error at the end. This is to match filemap_write_and_wait_range()
652 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
655 struct address_space *mapping = file->f_mapping;
656 struct inode *inode = mapping->host;
657 int sync_state = inode->i_state & I_DIRTY;
658 struct gfs2_inode *ip = GFS2_I(inode);
659 int ret = 0, ret1 = 0;
661 if (mapping->nrpages) {
662 ret1 = filemap_fdatawrite_range(mapping, start, end);
667 if (!gfs2_is_jdata(ip))
668 sync_state &= ~I_DIRTY_PAGES;
670 sync_state &= ~I_DIRTY_SYNC;
673 ret = sync_inode_metadata(inode, 1);
676 if (gfs2_is_jdata(ip))
677 filemap_write_and_wait(mapping);
678 gfs2_ail_flush(ip->i_gl, 1);
681 if (mapping->nrpages)
682 ret = filemap_fdatawait_range(mapping, start, end);
684 return ret ? ret : ret1;
688 * gfs2_file_write_iter - Perform a write to a file
689 * @iocb: The io context
690 * @iov: The data to write
691 * @nr_segs: Number of @iov segments
692 * @pos: The file position
694 * We have to do a lock/unlock here to refresh the inode size for
695 * O_APPEND writes, otherwise we can land up writing at the wrong
696 * offset. There is still a race, but provided the app is using its
697 * own file locking, this will make O_APPEND work as expected.
701 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
703 struct file *file = iocb->ki_filp;
704 struct gfs2_inode *ip = GFS2_I(file_inode(file));
707 ret = gfs2_rs_alloc(ip);
711 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
713 if (file->f_flags & O_APPEND) {
714 struct gfs2_holder gh;
716 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
719 gfs2_glock_dq_uninit(&gh);
722 return generic_file_write_iter(iocb, from);
725 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
728 struct gfs2_inode *ip = GFS2_I(inode);
729 struct buffer_head *dibh;
732 unsigned int nr_blks;
733 sector_t lblock = offset >> inode->i_blkbits;
735 error = gfs2_meta_inode_buffer(ip, &dibh);
739 gfs2_trans_add_meta(ip->i_gl, dibh);
741 if (gfs2_is_stuffed(ip)) {
742 error = gfs2_unstuff_dinode(ip, NULL);
748 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
750 set_buffer_zeronew(&bh_map);
752 error = gfs2_block_map(inode, lblock, &bh_map, 1);
755 len -= bh_map.b_size;
756 nr_blks = bh_map.b_size >> inode->i_blkbits;
758 if (!buffer_new(&bh_map))
760 if (unlikely(!buffer_zeronew(&bh_map))) {
765 if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
766 i_size_write(inode, offset + size);
768 mark_inode_dirty(inode);
775 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
776 unsigned int *data_blocks, unsigned int *ind_blocks)
778 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
779 unsigned int max_blocks = ip->i_rgd->rd_free_clone;
780 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
782 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
783 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
786 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
787 so it might end up with fewer data blocks */
788 if (max_data <= *data_blocks)
790 *data_blocks = max_data;
791 *ind_blocks = max_blocks - max_data;
792 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
795 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
799 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
802 struct inode *inode = file_inode(file);
803 struct gfs2_sbd *sdp = GFS2_SB(inode);
804 struct gfs2_inode *ip = GFS2_I(inode);
805 struct gfs2_alloc_parms ap = { .aflags = 0, };
806 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
807 loff_t bytes, max_bytes;
809 const loff_t pos = offset;
810 const loff_t count = len;
811 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
812 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
813 loff_t max_chunk_size = UINT_MAX & bsize_mask;
814 struct gfs2_holder gh;
816 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
818 /* We only support the FALLOC_FL_KEEP_SIZE mode */
819 if (mode & ~FALLOC_FL_KEEP_SIZE)
822 offset &= bsize_mask;
825 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
830 bytes = sdp->sd_sb.sb_bsize;
832 error = gfs2_rs_alloc(ip);
836 mutex_lock(&inode->i_mutex);
838 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
839 error = gfs2_glock_nq(&gh);
843 gfs2_size_hint(file, offset, len);
848 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
853 error = gfs2_quota_lock_check(ip);
858 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
860 ap.target = data_blocks + ind_blocks;
861 error = gfs2_inplace_reserve(ip, &ap);
863 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
867 bytes = sdp->sd_sb.sb_bsize;
873 calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
874 &max_bytes, &data_blocks, &ind_blocks);
876 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
877 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
878 if (gfs2_is_jdata(ip))
879 rblocks += data_blocks ? data_blocks : 1;
881 error = gfs2_trans_begin(sdp, rblocks,
882 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
886 error = fallocate_chunk(inode, offset, max_bytes, mode);
894 gfs2_inplace_release(ip);
895 gfs2_quota_unlock(ip);
899 error = generic_write_sync(file, pos, count);
903 gfs2_inplace_release(ip);
905 gfs2_quota_unlock(ip);
909 gfs2_holder_uninit(&gh);
910 mutex_unlock(&inode->i_mutex);
914 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
917 * gfs2_lock - acquire/release a posix lock on a file
918 * @file: the file pointer
919 * @cmd: either modify or retrieve lock state, possibly wait
920 * @fl: type and range of lock
925 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
927 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
928 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
929 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
931 if (!(fl->fl_flags & FL_POSIX))
933 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
936 if (cmd == F_CANCELLK) {
939 fl->fl_type = F_UNLCK;
941 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
942 if (fl->fl_type == F_UNLCK)
943 posix_lock_file_wait(file, fl);
947 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
948 else if (fl->fl_type == F_UNLCK)
949 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
951 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
954 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
956 struct gfs2_file *fp = file->private_data;
957 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
958 struct gfs2_inode *ip = GFS2_I(file_inode(file));
959 struct gfs2_glock *gl;
965 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
966 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
968 mutex_lock(&fp->f_fl_mutex);
972 if (fl_gh->gh_state == state)
974 flock_lock_file_wait(file,
975 &(struct file_lock){.fl_type = F_UNLCK});
976 gfs2_glock_dq(fl_gh);
977 gfs2_holder_reinit(state, flags, fl_gh);
979 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
980 &gfs2_flock_glops, CREATE, &gl);
983 gfs2_holder_init(gl, state, flags, fl_gh);
986 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
987 error = gfs2_glock_nq(fl_gh);
988 if (error != GLR_TRYFAILED)
990 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
995 gfs2_holder_uninit(fl_gh);
996 if (error == GLR_TRYFAILED)
999 error = flock_lock_file_wait(file, fl);
1000 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1004 mutex_unlock(&fp->f_fl_mutex);
1008 static void do_unflock(struct file *file, struct file_lock *fl)
1010 struct gfs2_file *fp = file->private_data;
1011 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1013 mutex_lock(&fp->f_fl_mutex);
1014 flock_lock_file_wait(file, fl);
1016 gfs2_glock_dq(fl_gh);
1017 gfs2_holder_uninit(fl_gh);
1019 mutex_unlock(&fp->f_fl_mutex);
1023 * gfs2_flock - acquire/release a flock lock on a file
1024 * @file: the file pointer
1025 * @cmd: either modify or retrieve lock state, possibly wait
1026 * @fl: type and range of lock
1031 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1033 if (!(fl->fl_flags & FL_FLOCK))
1035 if (fl->fl_type & LOCK_MAND)
1038 if (fl->fl_type == F_UNLCK) {
1039 do_unflock(file, fl);
1042 return do_flock(file, cmd, fl);
1046 const struct file_operations gfs2_file_fops = {
1047 .llseek = gfs2_llseek,
1048 .read = new_sync_read,
1049 .read_iter = generic_file_read_iter,
1050 .write = new_sync_write,
1051 .write_iter = gfs2_file_write_iter,
1052 .unlocked_ioctl = gfs2_ioctl,
1055 .release = gfs2_release,
1056 .fsync = gfs2_fsync,
1058 .flock = gfs2_flock,
1059 .splice_read = generic_file_splice_read,
1060 .splice_write = iter_file_splice_write,
1061 .setlease = simple_nosetlease,
1062 .fallocate = gfs2_fallocate,
1065 const struct file_operations gfs2_dir_fops = {
1066 .iterate = gfs2_readdir,
1067 .unlocked_ioctl = gfs2_ioctl,
1069 .release = gfs2_release,
1070 .fsync = gfs2_fsync,
1072 .flock = gfs2_flock,
1073 .llseek = default_llseek,
1076 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1078 const struct file_operations gfs2_file_fops_nolock = {
1079 .llseek = gfs2_llseek,
1080 .read = new_sync_read,
1081 .read_iter = generic_file_read_iter,
1082 .write = new_sync_write,
1083 .write_iter = gfs2_file_write_iter,
1084 .unlocked_ioctl = gfs2_ioctl,
1087 .release = gfs2_release,
1088 .fsync = gfs2_fsync,
1089 .splice_read = generic_file_splice_read,
1090 .splice_write = iter_file_splice_write,
1091 .setlease = generic_setlease,
1092 .fallocate = gfs2_fallocate,
1095 const struct file_operations gfs2_dir_fops_nolock = {
1096 .iterate = gfs2_readdir,
1097 .unlocked_ioctl = gfs2_ioctl,
1099 .release = gfs2_release,
1100 .fsync = gfs2_fsync,
1101 .llseek = default_llseek,