2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
18 #include <linux/mount.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
44 * gfs2_llseek - seek to a location in a file
47 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 * SEEK_END requires the glock for the file because it references the
52 * Returns: The new offset, or errno
55 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
57 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
58 struct gfs2_holder i_gh;
62 case SEEK_END: /* These reference inode->i_size */
65 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
68 error = generic_file_llseek(file, offset, origin);
69 gfs2_glock_dq_uninit(&i_gh);
74 error = generic_file_llseek(file, offset, origin);
84 * gfs2_readdir - Read directory entries from a directory
85 * @file: The directory to read from
86 * @dirent: Buffer for dirents
87 * @filldir: Function used to do the copying
92 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
94 struct inode *dir = file->f_mapping->host;
95 struct gfs2_inode *dip = GFS2_I(dir);
96 struct gfs2_holder d_gh;
97 u64 offset = file->f_pos;
100 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
101 error = gfs2_glock_nq(&d_gh);
103 gfs2_holder_uninit(&d_gh);
107 error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
109 gfs2_glock_dq_uninit(&d_gh);
111 file->f_pos = offset;
118 * @table: A table of 32 u32 flags
119 * @val: a 32 bit value to convert
121 * This function can be used to convert between fsflags values and
122 * GFS2's own flags values.
124 * Returns: the converted flags
126 static u32 fsflags_cvt(const u32 *table, u32 val)
138 static const u32 fsflags_to_gfs2[32] = {
140 [4] = GFS2_DIF_IMMUTABLE,
141 [5] = GFS2_DIF_APPENDONLY,
142 [7] = GFS2_DIF_NOATIME,
143 [12] = GFS2_DIF_EXHASH,
144 [14] = GFS2_DIF_INHERIT_JDATA,
145 [17] = GFS2_DIF_TOPDIR,
148 static const u32 gfs2_to_fsflags[32] = {
149 [gfs2fl_Sync] = FS_SYNC_FL,
150 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
151 [gfs2fl_AppendOnly] = FS_APPEND_FL,
152 [gfs2fl_NoAtime] = FS_NOATIME_FL,
153 [gfs2fl_ExHash] = FS_INDEX_FL,
154 [gfs2fl_TopLevel] = FS_TOPDIR_FL,
155 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
158 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
160 struct inode *inode = filp->f_path.dentry->d_inode;
161 struct gfs2_inode *ip = GFS2_I(inode);
162 struct gfs2_holder gh;
166 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
167 error = gfs2_glock_nq(&gh);
171 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
172 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
173 fsflags |= FS_JOURNAL_DATA_FL;
174 if (put_user(fsflags, ptr))
178 gfs2_holder_uninit(&gh);
182 void gfs2_set_inode_flags(struct inode *inode)
184 struct gfs2_inode *ip = GFS2_I(inode);
185 unsigned int flags = inode->i_flags;
187 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
188 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
189 inode->i_flags |= S_NOSEC;
190 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
191 flags |= S_IMMUTABLE;
192 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
194 if (ip->i_diskflags & GFS2_DIF_NOATIME)
196 if (ip->i_diskflags & GFS2_DIF_SYNC)
198 inode->i_flags = flags;
201 /* Flags that can be set by user space */
202 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
203 GFS2_DIF_IMMUTABLE| \
204 GFS2_DIF_APPENDONLY| \
209 GFS2_DIF_INHERIT_JDATA)
212 * gfs2_set_flags - set flags on an inode
214 * @flags: The flags to set
215 * @mask: Indicates which flags are valid
218 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
220 struct inode *inode = filp->f_path.dentry->d_inode;
221 struct gfs2_inode *ip = GFS2_I(inode);
222 struct gfs2_sbd *sdp = GFS2_SB(inode);
223 struct buffer_head *bh;
224 struct gfs2_holder gh;
226 u32 new_flags, flags;
228 error = mnt_want_write_file(filp);
232 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
237 if (!inode_owner_or_capable(inode))
241 flags = ip->i_diskflags;
242 new_flags = (flags & ~mask) | (reqflags & mask);
243 if ((new_flags ^ flags) == 0)
247 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
251 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
253 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
255 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
256 !capable(CAP_LINUX_IMMUTABLE))
258 if (!IS_IMMUTABLE(inode)) {
259 error = gfs2_permission(inode, MAY_WRITE);
263 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
264 if (flags & GFS2_DIF_JDATA)
265 gfs2_log_flush(sdp, ip->i_gl);
266 error = filemap_fdatawrite(inode->i_mapping);
269 error = filemap_fdatawait(inode->i_mapping);
273 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
276 error = gfs2_meta_inode_buffer(ip, &bh);
279 gfs2_trans_add_bh(ip->i_gl, bh, 1);
280 ip->i_diskflags = new_flags;
281 gfs2_dinode_out(ip, bh->b_data);
283 gfs2_set_inode_flags(inode);
284 gfs2_set_aops(inode);
288 gfs2_glock_dq_uninit(&gh);
290 mnt_drop_write_file(filp);
294 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
296 struct inode *inode = filp->f_path.dentry->d_inode;
297 u32 fsflags, gfsflags;
299 if (get_user(fsflags, ptr))
302 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
303 if (!S_ISDIR(inode->i_mode)) {
304 gfsflags &= ~GFS2_DIF_TOPDIR;
305 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
306 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
307 return do_gfs2_set_flags(filp, gfsflags, ~0);
309 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
312 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
315 case FS_IOC_GETFLAGS:
316 return gfs2_get_flags(filp, (u32 __user *)arg);
317 case FS_IOC_SETFLAGS:
318 return gfs2_set_flags(filp, (u32 __user *)arg);
320 return gfs2_fitrim(filp, (void __user *)arg);
326 * gfs2_allocate_page_backing - Use bmap to allocate blocks
327 * @page: The (locked) page to allocate backing for
329 * We try to allocate all the blocks required for the page in
330 * one go. This might fail for various reasons, so we keep
331 * trying until all the blocks to back this page are allocated.
332 * If some of the blocks are already allocated, thats ok too.
335 static int gfs2_allocate_page_backing(struct page *page)
337 struct inode *inode = page->mapping->host;
338 struct buffer_head bh;
339 unsigned long size = PAGE_CACHE_SIZE;
340 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
345 gfs2_block_map(inode, lblock, &bh, 1);
346 if (!buffer_mapped(&bh))
349 lblock += (bh.b_size >> inode->i_blkbits);
355 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
356 * @vma: The virtual memory area
357 * @page: The page which is about to become writable
359 * When the page becomes writable, we need to ensure that we have
360 * blocks allocated on disk to back that page.
363 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
365 struct page *page = vmf->page;
366 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
367 struct gfs2_inode *ip = GFS2_I(inode);
368 struct gfs2_sbd *sdp = GFS2_SB(inode);
369 unsigned long last_index;
370 u64 pos = page->index << PAGE_CACHE_SHIFT;
371 unsigned int data_blocks, ind_blocks, rblocks;
372 struct gfs2_holder gh;
376 sb_start_pagefault(inode->i_sb);
378 /* Update file times before taking page lock */
379 file_update_time(vma->vm_file);
381 ret = gfs2_rs_alloc(ip);
385 atomic_set(&ip->i_res->rs_sizehint,
386 PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift);
388 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
389 ret = gfs2_glock_nq(&gh);
393 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
394 set_bit(GIF_SW_PAGED, &ip->i_flags);
396 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
398 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
405 ret = gfs2_rindex_update(sdp);
409 ret = gfs2_quota_lock_check(ip);
412 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
413 ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
415 goto out_quota_unlock;
417 rblocks = RES_DINODE + ind_blocks;
418 if (gfs2_is_jdata(ip))
419 rblocks += data_blocks ? data_blocks : 1;
420 if (ind_blocks || data_blocks) {
421 rblocks += RES_STATFS + RES_QUOTA;
422 rblocks += gfs2_rg_blocks(ip);
424 ret = gfs2_trans_begin(sdp, rblocks, 0);
430 size = i_size_read(inode);
431 last_index = (size - 1) >> PAGE_CACHE_SHIFT;
432 /* Check page index against inode size */
433 if (size == 0 || (page->index > last_index))
437 /* If truncated, we must retry the operation, we may have raced
438 * with the glock demotion code.
440 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
443 /* Unstuff, if required, and allocate backing blocks for page */
445 if (gfs2_is_stuffed(ip))
446 ret = gfs2_unstuff_dinode(ip, page);
448 ret = gfs2_allocate_page_backing(page);
455 gfs2_inplace_release(ip);
457 gfs2_quota_unlock(ip);
461 gfs2_holder_uninit(&gh);
463 set_page_dirty(page);
464 wait_on_page_writeback(page);
466 sb_end_pagefault(inode->i_sb);
467 return block_page_mkwrite_return(ret);
470 static const struct vm_operations_struct gfs2_vm_ops = {
471 .fault = filemap_fault,
472 .page_mkwrite = gfs2_page_mkwrite,
477 * @file: The file to map
478 * @vma: The VMA which described the mapping
480 * There is no need to get a lock here unless we should be updating
481 * atime. We ignore any locking errors since the only consequence is
482 * a missed atime update (which will just be deferred until later).
487 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
489 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
491 if (!(file->f_flags & O_NOATIME) &&
492 !IS_NOATIME(&ip->i_inode)) {
493 struct gfs2_holder i_gh;
496 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
497 error = gfs2_glock_nq(&i_gh);
500 gfs2_glock_dq(&i_gh);
502 gfs2_holder_uninit(&i_gh);
506 vma->vm_ops = &gfs2_vm_ops;
507 vma->vm_flags |= VM_CAN_NONLINEAR;
513 * gfs2_open - open a file
514 * @inode: the inode to open
515 * @file: the struct file for this opening
520 static int gfs2_open(struct inode *inode, struct file *file)
522 struct gfs2_inode *ip = GFS2_I(inode);
523 struct gfs2_holder i_gh;
524 struct gfs2_file *fp;
527 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
531 mutex_init(&fp->f_fl_mutex);
533 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
534 file->private_data = fp;
536 if (S_ISREG(ip->i_inode.i_mode)) {
537 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
542 if (!(file->f_flags & O_LARGEFILE) &&
543 i_size_read(inode) > MAX_NON_LFS) {
548 gfs2_glock_dq_uninit(&i_gh);
554 gfs2_glock_dq_uninit(&i_gh);
556 file->private_data = NULL;
562 * gfs2_release - called to close a struct file
563 * @inode: the inode the struct file belongs to
564 * @file: the struct file being closed
569 static int gfs2_release(struct inode *inode, struct file *file)
571 struct gfs2_inode *ip = GFS2_I(inode);
573 kfree(file->private_data);
574 file->private_data = NULL;
576 if ((file->f_mode & FMODE_WRITE) &&
577 (atomic_read(&inode->i_writecount) == 1))
584 * gfs2_fsync - sync the dirty data for a file (across the cluster)
585 * @file: the file that points to the dentry
586 * @start: the start position in the file to sync
587 * @end: the end position in the file to sync
588 * @datasync: set if we can ignore timestamp changes
590 * We split the data flushing here so that we don't wait for the data
591 * until after we've also sent the metadata to disk. Note that for
592 * data=ordered, we will write & wait for the data at the log flush
593 * stage anyway, so this is unlikely to make much of a difference
594 * except in the data=writeback case.
596 * If the fdatawrite fails due to any reason except -EIO, we will
597 * continue the remainder of the fsync, although we'll still report
598 * the error at the end. This is to match filemap_write_and_wait_range()
604 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
607 struct address_space *mapping = file->f_mapping;
608 struct inode *inode = mapping->host;
609 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
610 struct gfs2_inode *ip = GFS2_I(inode);
611 int ret = 0, ret1 = 0;
613 if (mapping->nrpages) {
614 ret1 = filemap_fdatawrite_range(mapping, start, end);
620 sync_state &= ~I_DIRTY_SYNC;
623 ret = sync_inode_metadata(inode, 1);
626 if (gfs2_is_jdata(ip))
627 filemap_write_and_wait(mapping);
628 gfs2_ail_flush(ip->i_gl, 1);
631 if (mapping->nrpages)
632 ret = filemap_fdatawait_range(mapping, start, end);
634 return ret ? ret : ret1;
638 * gfs2_file_aio_write - Perform a write to a file
639 * @iocb: The io context
640 * @iov: The data to write
641 * @nr_segs: Number of @iov segments
642 * @pos: The file position
644 * We have to do a lock/unlock here to refresh the inode size for
645 * O_APPEND writes, otherwise we can land up writing at the wrong
646 * offset. There is still a race, but provided the app is using its
647 * own file locking, this will make O_APPEND work as expected.
651 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
652 unsigned long nr_segs, loff_t pos)
654 struct file *file = iocb->ki_filp;
655 size_t writesize = iov_length(iov, nr_segs);
656 struct dentry *dentry = file->f_dentry;
657 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
658 struct gfs2_sbd *sdp;
661 sdp = GFS2_SB(file->f_mapping->host);
662 ret = gfs2_rs_alloc(ip);
666 atomic_set(&ip->i_res->rs_sizehint, writesize >> sdp->sd_sb.sb_bsize_shift);
667 if (file->f_flags & O_APPEND) {
668 struct gfs2_holder gh;
670 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
673 gfs2_glock_dq_uninit(&gh);
676 return generic_file_aio_write(iocb, iov, nr_segs, pos);
679 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
682 struct gfs2_inode *ip = GFS2_I(inode);
683 struct buffer_head *dibh;
686 unsigned int nr_blks;
687 sector_t lblock = offset >> inode->i_blkbits;
689 error = gfs2_meta_inode_buffer(ip, &dibh);
693 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
695 if (gfs2_is_stuffed(ip)) {
696 error = gfs2_unstuff_dinode(ip, NULL);
702 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
704 set_buffer_zeronew(&bh_map);
706 error = gfs2_block_map(inode, lblock, &bh_map, 1);
709 len -= bh_map.b_size;
710 nr_blks = bh_map.b_size >> inode->i_blkbits;
712 if (!buffer_new(&bh_map))
714 if (unlikely(!buffer_zeronew(&bh_map))) {
719 if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
720 i_size_write(inode, offset + size);
722 mark_inode_dirty(inode);
729 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
730 unsigned int *data_blocks, unsigned int *ind_blocks)
732 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
733 unsigned int max_blocks = ip->i_rgd->rd_free_clone;
734 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
736 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
737 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
740 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
741 so it might end up with fewer data blocks */
742 if (max_data <= *data_blocks)
744 *data_blocks = max_data;
745 *ind_blocks = max_blocks - max_data;
746 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
749 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
753 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
756 struct inode *inode = file->f_path.dentry->d_inode;
757 struct gfs2_sbd *sdp = GFS2_SB(inode);
758 struct gfs2_inode *ip = GFS2_I(inode);
759 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
760 loff_t bytes, max_bytes;
762 const loff_t pos = offset;
763 const loff_t count = len;
764 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
765 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
766 loff_t max_chunk_size = UINT_MAX & bsize_mask;
767 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
769 /* We only support the FALLOC_FL_KEEP_SIZE mode */
770 if (mode & ~FALLOC_FL_KEEP_SIZE)
773 offset &= bsize_mask;
776 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
781 bytes = sdp->sd_sb.sb_bsize;
783 error = gfs2_rs_alloc(ip);
787 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
788 error = gfs2_glock_nq(&ip->i_gh);
792 atomic_set(&ip->i_res->rs_sizehint, len >> sdp->sd_sb.sb_bsize_shift);
797 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
802 error = gfs2_quota_lock_check(ip);
807 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
809 error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
811 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
815 bytes = sdp->sd_sb.sb_bsize;
821 calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
822 &max_bytes, &data_blocks, &ind_blocks);
824 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
825 RES_RG_HDR + gfs2_rg_blocks(ip);
826 if (gfs2_is_jdata(ip))
827 rblocks += data_blocks ? data_blocks : 1;
829 error = gfs2_trans_begin(sdp, rblocks,
830 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
834 error = fallocate_chunk(inode, offset, max_bytes, mode);
842 gfs2_inplace_release(ip);
843 gfs2_quota_unlock(ip);
847 error = generic_write_sync(file, pos, count);
851 gfs2_inplace_release(ip);
853 gfs2_quota_unlock(ip);
855 gfs2_glock_dq(&ip->i_gh);
857 gfs2_holder_uninit(&ip->i_gh);
861 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
864 * gfs2_setlease - acquire/release a file lease
865 * @file: the file pointer
869 * We don't currently have a way to enforce a lease across the whole
870 * cluster; until we do, disable leases (by just returning -EINVAL),
871 * unless the administrator has requested purely local locking.
873 * Locking: called under lock_flocks
878 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
884 * gfs2_lock - acquire/release a posix lock on a file
885 * @file: the file pointer
886 * @cmd: either modify or retrieve lock state, possibly wait
887 * @fl: type and range of lock
892 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
894 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
895 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
896 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
898 if (!(fl->fl_flags & FL_POSIX))
900 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
903 if (cmd == F_CANCELLK) {
906 fl->fl_type = F_UNLCK;
908 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
911 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
912 else if (fl->fl_type == F_UNLCK)
913 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
915 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
918 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
920 struct gfs2_file *fp = file->private_data;
921 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
922 struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
923 struct gfs2_glock *gl;
928 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
929 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
931 mutex_lock(&fp->f_fl_mutex);
935 if (fl_gh->gh_state == state)
937 flock_lock_file_wait(file,
938 &(struct file_lock){.fl_type = F_UNLCK});
939 gfs2_glock_dq_wait(fl_gh);
940 gfs2_holder_reinit(state, flags, fl_gh);
942 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
943 &gfs2_flock_glops, CREATE, &gl);
946 gfs2_holder_init(gl, state, flags, fl_gh);
949 error = gfs2_glock_nq(fl_gh);
951 gfs2_holder_uninit(fl_gh);
952 if (error == GLR_TRYFAILED)
955 error = flock_lock_file_wait(file, fl);
956 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
960 mutex_unlock(&fp->f_fl_mutex);
964 static void do_unflock(struct file *file, struct file_lock *fl)
966 struct gfs2_file *fp = file->private_data;
967 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
969 mutex_lock(&fp->f_fl_mutex);
970 flock_lock_file_wait(file, fl);
972 gfs2_glock_dq_wait(fl_gh);
973 gfs2_holder_uninit(fl_gh);
975 mutex_unlock(&fp->f_fl_mutex);
979 * gfs2_flock - acquire/release a flock lock on a file
980 * @file: the file pointer
981 * @cmd: either modify or retrieve lock state, possibly wait
982 * @fl: type and range of lock
987 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
989 if (!(fl->fl_flags & FL_FLOCK))
991 if (fl->fl_type & LOCK_MAND)
994 if (fl->fl_type == F_UNLCK) {
995 do_unflock(file, fl);
998 return do_flock(file, cmd, fl);
1002 const struct file_operations gfs2_file_fops = {
1003 .llseek = gfs2_llseek,
1004 .read = do_sync_read,
1005 .aio_read = generic_file_aio_read,
1006 .write = do_sync_write,
1007 .aio_write = gfs2_file_aio_write,
1008 .unlocked_ioctl = gfs2_ioctl,
1011 .release = gfs2_release,
1012 .fsync = gfs2_fsync,
1014 .flock = gfs2_flock,
1015 .splice_read = generic_file_splice_read,
1016 .splice_write = generic_file_splice_write,
1017 .setlease = gfs2_setlease,
1018 .fallocate = gfs2_fallocate,
1021 const struct file_operations gfs2_dir_fops = {
1022 .readdir = gfs2_readdir,
1023 .unlocked_ioctl = gfs2_ioctl,
1025 .release = gfs2_release,
1026 .fsync = gfs2_fsync,
1028 .flock = gfs2_flock,
1029 .llseek = default_llseek,
1032 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1034 const struct file_operations gfs2_file_fops_nolock = {
1035 .llseek = gfs2_llseek,
1036 .read = do_sync_read,
1037 .aio_read = generic_file_aio_read,
1038 .write = do_sync_write,
1039 .aio_write = gfs2_file_aio_write,
1040 .unlocked_ioctl = gfs2_ioctl,
1043 .release = gfs2_release,
1044 .fsync = gfs2_fsync,
1045 .splice_read = generic_file_splice_read,
1046 .splice_write = generic_file_splice_write,
1047 .setlease = generic_setlease,
1048 .fallocate = gfs2_fallocate,
1051 const struct file_operations gfs2_dir_fops_nolock = {
1052 .readdir = gfs2_readdir,
1053 .unlocked_ioctl = gfs2_ioctl,
1055 .release = gfs2_release,
1056 .fsync = gfs2_fsync,
1057 .llseek = default_llseek,