1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * File open, close, extend, truncate
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/capability.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38 #include <linux/quotaops.h>
39 #include <linux/blkdev.h>
41 #include <cluster/masklog.h>
49 #include "extent_map.h"
62 #include "refcounttree.h"
63 #include "ocfs2_trace.h"
65 #include "buffer_head_io.h"
67 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
69 struct ocfs2_file_private *fp;
71 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
76 mutex_init(&fp->fp_mutex);
77 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
78 file->private_data = fp;
83 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
85 struct ocfs2_file_private *fp = file->private_data;
86 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
89 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
90 ocfs2_lock_res_free(&fp->fp_flock);
92 file->private_data = NULL;
96 static int ocfs2_file_open(struct inode *inode, struct file *file)
99 int mode = file->f_flags;
100 struct ocfs2_inode_info *oi = OCFS2_I(inode);
102 trace_ocfs2_file_open(inode, file, file->f_path.dentry,
103 (unsigned long long)OCFS2_I(inode)->ip_blkno,
104 file->f_path.dentry->d_name.len,
105 file->f_path.dentry->d_name.name, mode);
107 if (file->f_mode & FMODE_WRITE)
108 dquot_initialize(inode);
110 spin_lock(&oi->ip_lock);
112 /* Check that the inode hasn't been wiped from disk by another
113 * node. If it hasn't then we're safe as long as we hold the
114 * spin lock until our increment of open count. */
115 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
116 spin_unlock(&oi->ip_lock);
123 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
126 spin_unlock(&oi->ip_lock);
128 status = ocfs2_init_file_private(inode, file);
131 * We want to set open count back if we're failing the
134 spin_lock(&oi->ip_lock);
136 spin_unlock(&oi->ip_lock);
143 static int ocfs2_file_release(struct inode *inode, struct file *file)
145 struct ocfs2_inode_info *oi = OCFS2_I(inode);
147 spin_lock(&oi->ip_lock);
148 if (!--oi->ip_open_count)
149 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
151 trace_ocfs2_file_release(inode, file, file->f_path.dentry,
153 file->f_path.dentry->d_name.len,
154 file->f_path.dentry->d_name.name,
156 spin_unlock(&oi->ip_lock);
158 ocfs2_free_file_private(inode, file);
163 static int ocfs2_dir_open(struct inode *inode, struct file *file)
165 return ocfs2_init_file_private(inode, file);
168 static int ocfs2_dir_release(struct inode *inode, struct file *file)
170 ocfs2_free_file_private(inode, file);
174 static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
178 struct inode *inode = file->f_mapping->host;
179 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
180 struct ocfs2_inode_info *oi = OCFS2_I(inode);
181 journal_t *journal = osb->journal->j_journal;
184 bool needs_barrier = false;
186 trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
187 OCFS2_I(inode)->ip_blkno,
188 file->f_path.dentry->d_name.len,
189 file->f_path.dentry->d_name.name,
190 (unsigned long long)datasync);
192 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
195 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
199 commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
200 if (journal->j_flags & JBD2_BARRIER &&
201 !jbd2_trans_will_send_data_barrier(journal, commit_tid))
202 needs_barrier = true;
203 err = jbd2_complete_transaction(journal, commit_tid);
205 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
213 return (err < 0) ? -EIO : 0;
216 int ocfs2_should_update_atime(struct inode *inode,
217 struct vfsmount *vfsmnt)
220 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
222 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
225 if ((inode->i_flags & S_NOATIME) ||
226 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
230 * We can be called with no vfsmnt structure - NFSD will
233 * Note that our action here is different than touch_atime() -
234 * if we can't tell whether this is a noatime mount, then we
235 * don't know whether to trust the value of s_atime_quantum.
240 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
241 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
244 if (vfsmnt->mnt_flags & MNT_RELATIME) {
245 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
246 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
253 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
259 int ocfs2_update_inode_atime(struct inode *inode,
260 struct buffer_head *bh)
263 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
265 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
267 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
268 if (IS_ERR(handle)) {
269 ret = PTR_ERR(handle);
274 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
275 OCFS2_JOURNAL_ACCESS_WRITE);
282 * Don't use ocfs2_mark_inode_dirty() here as we don't always
283 * have i_mutex to guard against concurrent changes to other
286 inode->i_atime = CURRENT_TIME;
287 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
288 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
289 ocfs2_update_inode_fsync_trans(handle, inode, 0);
290 ocfs2_journal_dirty(handle, bh);
293 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
298 static int ocfs2_set_inode_size(handle_t *handle,
300 struct buffer_head *fe_bh,
305 i_size_write(inode, new_i_size);
306 inode->i_blocks = ocfs2_inode_sector_count(inode);
307 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
309 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
319 int ocfs2_simple_size_update(struct inode *inode,
320 struct buffer_head *di_bh,
324 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
325 handle_t *handle = NULL;
327 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
328 if (IS_ERR(handle)) {
329 ret = PTR_ERR(handle);
334 ret = ocfs2_set_inode_size(handle, inode, di_bh,
339 ocfs2_update_inode_fsync_trans(handle, inode, 0);
340 ocfs2_commit_trans(osb, handle);
345 static int ocfs2_cow_file_pos(struct inode *inode,
346 struct buffer_head *fe_bh,
350 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
351 unsigned int num_clusters = 0;
352 unsigned int ext_flags = 0;
355 * If the new offset is aligned to the range of the cluster, there is
356 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
359 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
362 status = ocfs2_get_clusters(inode, cpos, &phys,
363 &num_clusters, &ext_flags);
369 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
372 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
378 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
380 struct buffer_head *fe_bh,
385 struct ocfs2_dinode *di;
389 * We need to CoW the cluster contains the offset if it is reflinked
390 * since we will call ocfs2_zero_range_for_truncate later which will
391 * write "0" from offset to the end of the cluster.
393 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
399 /* TODO: This needs to actually orphan the inode in this
402 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
403 if (IS_ERR(handle)) {
404 status = PTR_ERR(handle);
409 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
410 OCFS2_JOURNAL_ACCESS_WRITE);
417 * Do this before setting i_size.
419 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
420 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
427 i_size_write(inode, new_i_size);
428 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
430 di = (struct ocfs2_dinode *) fe_bh->b_data;
431 di->i_size = cpu_to_le64(new_i_size);
432 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
433 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
434 ocfs2_update_inode_fsync_trans(handle, inode, 0);
436 ocfs2_journal_dirty(handle, fe_bh);
439 ocfs2_commit_trans(osb, handle);
444 static int ocfs2_truncate_file(struct inode *inode,
445 struct buffer_head *di_bh,
449 struct ocfs2_dinode *fe = NULL;
450 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
452 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
453 * already validated it */
454 fe = (struct ocfs2_dinode *) di_bh->b_data;
456 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
457 (unsigned long long)le64_to_cpu(fe->i_size),
458 (unsigned long long)new_i_size);
460 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
461 "Inode %llu, inode i_size = %lld != di "
462 "i_size = %llu, i_flags = 0x%x\n",
463 (unsigned long long)OCFS2_I(inode)->ip_blkno,
465 (unsigned long long)le64_to_cpu(fe->i_size),
466 le32_to_cpu(fe->i_flags));
468 if (new_i_size > le64_to_cpu(fe->i_size)) {
469 trace_ocfs2_truncate_file_error(
470 (unsigned long long)le64_to_cpu(fe->i_size),
471 (unsigned long long)new_i_size);
477 down_write(&OCFS2_I(inode)->ip_alloc_sem);
479 ocfs2_resv_discard(&osb->osb_la_resmap,
480 &OCFS2_I(inode)->ip_la_data_resv);
483 * The inode lock forced other nodes to sync and drop their
484 * pages, which (correctly) happens even if we have a truncate
485 * without allocation change - ocfs2 cluster sizes can be much
486 * greater than page size, so we have to truncate them
489 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
490 truncate_inode_pages(inode->i_mapping, new_i_size);
492 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
493 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
494 i_size_read(inode), 1);
498 goto bail_unlock_sem;
501 /* alright, we're going to need to do a full blown alloc size
502 * change. Orphan the inode so that recovery can complete the
503 * truncate if necessary. This does the task of marking
505 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
508 goto bail_unlock_sem;
511 status = ocfs2_commit_truncate(osb, inode, di_bh);
514 goto bail_unlock_sem;
517 /* TODO: orphan dir cleanup here. */
519 up_write(&OCFS2_I(inode)->ip_alloc_sem);
522 if (!status && OCFS2_I(inode)->ip_clusters == 0)
523 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
529 * extend file allocation only here.
530 * we'll update all the disk stuff, and oip->alloc_size
532 * expect stuff to be locked, a transaction started and enough data /
533 * metadata reservations in the contexts.
535 * Will return -EAGAIN, and a reason if a restart is needed.
536 * If passed in, *reason will always be set, even in error.
538 int ocfs2_add_inode_data(struct ocfs2_super *osb,
543 struct buffer_head *fe_bh,
545 struct ocfs2_alloc_context *data_ac,
546 struct ocfs2_alloc_context *meta_ac,
547 enum ocfs2_alloc_restarted *reason_ret)
550 struct ocfs2_extent_tree et;
552 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
553 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
554 clusters_to_add, mark_unwritten,
555 data_ac, meta_ac, reason_ret);
560 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
561 u32 clusters_to_add, int mark_unwritten)
564 int restart_func = 0;
567 struct buffer_head *bh = NULL;
568 struct ocfs2_dinode *fe = NULL;
569 handle_t *handle = NULL;
570 struct ocfs2_alloc_context *data_ac = NULL;
571 struct ocfs2_alloc_context *meta_ac = NULL;
572 enum ocfs2_alloc_restarted why = RESTART_NONE;
573 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
574 struct ocfs2_extent_tree et;
578 * Unwritten extent only exists for file systems which
581 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
583 status = ocfs2_read_inode_block(inode, &bh);
588 fe = (struct ocfs2_dinode *) bh->b_data;
591 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
593 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
594 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
601 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
602 handle = ocfs2_start_trans(osb, credits);
603 if (IS_ERR(handle)) {
604 status = PTR_ERR(handle);
610 restarted_transaction:
611 trace_ocfs2_extend_allocation(
612 (unsigned long long)OCFS2_I(inode)->ip_blkno,
613 (unsigned long long)i_size_read(inode),
614 le32_to_cpu(fe->i_clusters), clusters_to_add,
617 status = dquot_alloc_space_nodirty(inode,
618 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
623 /* reserve a write to the file entry early on - that we if we
624 * run out of credits in the allocation path, we can still
626 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
627 OCFS2_JOURNAL_ACCESS_WRITE);
633 prev_clusters = OCFS2_I(inode)->ip_clusters;
635 status = ocfs2_add_inode_data(osb,
645 if ((status < 0) && (status != -EAGAIN)) {
646 if (status != -ENOSPC)
650 ocfs2_update_inode_fsync_trans(handle, inode, 1);
651 ocfs2_journal_dirty(handle, bh);
653 spin_lock(&OCFS2_I(inode)->ip_lock);
654 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
655 spin_unlock(&OCFS2_I(inode)->ip_lock);
656 /* Release unused quota reservation */
657 dquot_free_space(inode,
658 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
661 if (why != RESTART_NONE && clusters_to_add) {
662 if (why == RESTART_META) {
666 BUG_ON(why != RESTART_TRANS);
668 status = ocfs2_allocate_extend_trans(handle, 1);
670 /* handle still has to be committed at
676 goto restarted_transaction;
680 trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
681 le32_to_cpu(fe->i_clusters),
682 (unsigned long long)le64_to_cpu(fe->i_size),
683 OCFS2_I(inode)->ip_clusters,
684 (unsigned long long)i_size_read(inode));
687 if (status < 0 && did_quota)
688 dquot_free_space(inode,
689 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
691 ocfs2_commit_trans(osb, handle);
695 ocfs2_free_alloc_context(data_ac);
699 ocfs2_free_alloc_context(meta_ac);
702 if ((!status) && restart_func) {
713 * While a write will already be ordering the data, a truncate will not.
714 * Thus, we need to explicitly order the zeroed pages.
716 static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
717 struct buffer_head *di_bh)
719 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
720 handle_t *handle = NULL;
723 if (!ocfs2_should_order_data(inode))
726 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
727 if (IS_ERR(handle)) {
733 ret = ocfs2_jbd2_file_inode(handle, inode);
739 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
740 OCFS2_JOURNAL_ACCESS_WRITE);
743 ocfs2_update_inode_fsync_trans(handle, inode, 1);
748 ocfs2_commit_trans(osb, handle);
749 handle = ERR_PTR(ret);
754 /* Some parts of this taken from generic_cont_expand, which turned out
755 * to be too fragile to do exactly what we need without us having to
756 * worry about recursive locking in ->write_begin() and ->write_end(). */
757 static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
758 u64 abs_to, struct buffer_head *di_bh)
760 struct address_space *mapping = inode->i_mapping;
762 unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
765 unsigned zero_from, zero_to, block_start, block_end;
766 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
768 BUG_ON(abs_from >= abs_to);
769 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
770 BUG_ON(abs_from & (inode->i_blkbits - 1));
772 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
773 if (IS_ERR(handle)) {
774 ret = PTR_ERR(handle);
778 page = find_or_create_page(mapping, index, GFP_NOFS);
782 goto out_commit_trans;
785 /* Get the offsets within the page that we want to zero */
786 zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
787 zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
789 zero_to = PAGE_CACHE_SIZE;
791 trace_ocfs2_write_zero_page(
792 (unsigned long long)OCFS2_I(inode)->ip_blkno,
793 (unsigned long long)abs_from,
794 (unsigned long long)abs_to,
795 index, zero_from, zero_to);
797 /* We know that zero_from is block aligned */
798 for (block_start = zero_from; block_start < zero_to;
799 block_start = block_end) {
800 block_end = block_start + (1 << inode->i_blkbits);
803 * block_start is block-aligned. Bump it by one to force
804 * __block_write_begin and block_commit_write to zero the
807 ret = __block_write_begin(page, block_start + 1, 0,
815 /* must not update i_size! */
816 ret = block_commit_write(page, block_start + 1,
825 * fs-writeback will release the dirty pages without page lock
826 * whose offset are over inode size, the release happens at
827 * block_write_full_page().
829 i_size_write(inode, abs_to);
830 inode->i_blocks = ocfs2_inode_sector_count(inode);
831 di->i_size = cpu_to_le64((u64)i_size_read(inode));
832 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
833 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
834 di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
835 di->i_mtime_nsec = di->i_ctime_nsec;
837 ocfs2_journal_dirty(handle, di_bh);
838 ocfs2_update_inode_fsync_trans(handle, inode, 1);
843 page_cache_release(page);
846 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
852 * Find the next range to zero. We do this in terms of bytes because
853 * that's what ocfs2_zero_extend() wants, and it is dealing with the
854 * pagecache. We may return multiple extents.
856 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
857 * needs to be zeroed. range_start and range_end return the next zeroing
858 * range. A subsequent call should pass the previous range_end as its
859 * zero_start. If range_end is 0, there's nothing to do.
861 * Unwritten extents are skipped over. Refcounted extents are CoWd.
863 static int ocfs2_zero_extend_get_range(struct inode *inode,
864 struct buffer_head *di_bh,
865 u64 zero_start, u64 zero_end,
866 u64 *range_start, u64 *range_end)
868 int rc = 0, needs_cow = 0;
869 u32 p_cpos, zero_clusters = 0;
871 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
872 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
873 unsigned int num_clusters = 0;
874 unsigned int ext_flags = 0;
876 while (zero_cpos < last_cpos) {
877 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
878 &num_clusters, &ext_flags);
884 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
885 zero_clusters = num_clusters;
886 if (ext_flags & OCFS2_EXT_REFCOUNTED)
891 zero_cpos += num_clusters;
893 if (!zero_clusters) {
898 while ((zero_cpos + zero_clusters) < last_cpos) {
899 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
900 &p_cpos, &num_clusters,
907 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
909 if (ext_flags & OCFS2_EXT_REFCOUNTED)
911 zero_clusters += num_clusters;
913 if ((zero_cpos + zero_clusters) > last_cpos)
914 zero_clusters = last_cpos - zero_cpos;
917 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
918 zero_clusters, UINT_MAX);
925 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
926 *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
927 zero_cpos + zero_clusters);
934 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
935 * has made sure that the entire range needs zeroing.
937 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
938 u64 range_end, struct buffer_head *di_bh)
942 u64 zero_pos = range_start;
944 trace_ocfs2_zero_extend_range(
945 (unsigned long long)OCFS2_I(inode)->ip_blkno,
946 (unsigned long long)range_start,
947 (unsigned long long)range_end);
948 BUG_ON(range_start >= range_end);
950 while (zero_pos < range_end) {
951 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
952 if (next_pos > range_end)
953 next_pos = range_end;
954 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
962 * Very large extends have the potential to lock up
963 * the cpu for extended periods of time.
971 int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
975 u64 zero_start, range_start = 0, range_end = 0;
976 struct super_block *sb = inode->i_sb;
978 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
979 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
980 (unsigned long long)zero_start,
981 (unsigned long long)i_size_read(inode));
982 while (zero_start < zero_to_size) {
983 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
994 if (range_start < zero_start)
995 range_start = zero_start;
996 if (range_end > zero_to_size)
997 range_end = zero_to_size;
999 ret = ocfs2_zero_extend_range(inode, range_start,
1005 zero_start = range_end;
1011 int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1012 u64 new_i_size, u64 zero_to)
1015 u32 clusters_to_add;
1016 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1019 * Only quota files call this without a bh, and they can't be
1022 BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
1023 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1025 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1026 if (clusters_to_add < oi->ip_clusters)
1027 clusters_to_add = 0;
1029 clusters_to_add -= oi->ip_clusters;
1031 if (clusters_to_add) {
1032 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
1033 clusters_to_add, 0);
1041 * Call this even if we don't add any clusters to the tree. We
1042 * still need to zero the area between the old i_size and the
1045 ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1053 static int ocfs2_extend_file(struct inode *inode,
1054 struct buffer_head *di_bh,
1058 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1062 /* setattr sometimes calls us like this. */
1063 if (new_i_size == 0)
1066 if (i_size_read(inode) == new_i_size)
1068 BUG_ON(new_i_size < i_size_read(inode));
1071 * The alloc sem blocks people in read/write from reading our
1072 * allocation until we're done changing it. We depend on
1073 * i_mutex to block other extend/truncate calls while we're
1074 * here. We even have to hold it for sparse files because there
1075 * might be some tail zeroing.
1077 down_write(&oi->ip_alloc_sem);
1079 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1081 * We can optimize small extends by keeping the inodes
1084 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1085 up_write(&oi->ip_alloc_sem);
1086 goto out_update_size;
1089 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1091 up_write(&oi->ip_alloc_sem);
1097 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1098 ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1100 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1103 up_write(&oi->ip_alloc_sem);
1111 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1119 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1121 int status = 0, size_change;
1122 struct inode *inode = dentry->d_inode;
1123 struct super_block *sb = inode->i_sb;
1124 struct ocfs2_super *osb = OCFS2_SB(sb);
1125 struct buffer_head *bh = NULL;
1126 handle_t *handle = NULL;
1127 struct dquot *transfer_to[MAXQUOTAS] = { };
1130 trace_ocfs2_setattr(inode, dentry,
1131 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1132 dentry->d_name.len, dentry->d_name.name,
1133 attr->ia_valid, attr->ia_mode,
1134 from_kuid(&init_user_ns, attr->ia_uid),
1135 from_kgid(&init_user_ns, attr->ia_gid));
1137 /* ensuring we don't even attempt to truncate a symlink */
1138 if (S_ISLNK(inode->i_mode))
1139 attr->ia_valid &= ~ATTR_SIZE;
1141 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1142 | ATTR_GID | ATTR_UID | ATTR_MODE)
1143 if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1146 status = inode_change_ok(inode, attr);
1150 if (is_quota_modification(inode, attr))
1151 dquot_initialize(inode);
1152 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1154 status = ocfs2_rw_lock(inode, 1);
1161 status = ocfs2_inode_lock(inode, &bh, 1);
1163 if (status != -ENOENT)
1165 goto bail_unlock_rw;
1169 status = inode_newsize_ok(inode, attr->ia_size);
1173 inode_dio_wait(inode);
1175 if (i_size_read(inode) >= attr->ia_size) {
1176 if (ocfs2_should_order_data(inode)) {
1177 status = ocfs2_begin_ordered_truncate(inode,
1182 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1184 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1186 if (status != -ENOSPC)
1193 if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1194 (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1196 * Gather pointers to quota structures so that allocation /
1197 * freeing of quota structures happens here and not inside
1198 * dquot_transfer() where we have problems with lock ordering
1200 if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1201 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1202 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1203 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1204 if (!transfer_to[USRQUOTA]) {
1209 if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1210 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1211 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1212 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1213 if (!transfer_to[GRPQUOTA]) {
1218 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1219 2 * ocfs2_quota_trans_credits(sb));
1220 if (IS_ERR(handle)) {
1221 status = PTR_ERR(handle);
1225 status = __dquot_transfer(inode, transfer_to);
1229 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1230 if (IS_ERR(handle)) {
1231 status = PTR_ERR(handle);
1237 setattr_copy(inode, attr);
1238 mark_inode_dirty(inode);
1240 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1245 ocfs2_commit_trans(osb, handle);
1247 ocfs2_inode_unlock(inode, 1);
1250 ocfs2_rw_unlock(inode, 1);
1254 /* Release quota pointers in case we acquired them */
1255 for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1256 dqput(transfer_to[qtype]);
1258 if (!status && attr->ia_valid & ATTR_MODE) {
1259 status = posix_acl_chmod(inode, inode->i_mode);
1267 int ocfs2_getattr(struct vfsmount *mnt,
1268 struct dentry *dentry,
1271 struct inode *inode = dentry->d_inode;
1272 struct super_block *sb = dentry->d_inode->i_sb;
1273 struct ocfs2_super *osb = sb->s_fs_info;
1276 err = ocfs2_inode_revalidate(dentry);
1283 generic_fillattr(inode, stat);
1285 /* We set the blksize from the cluster size for performance */
1286 stat->blksize = osb->s_clustersize;
1292 int ocfs2_permission(struct inode *inode, int mask)
1296 if (mask & MAY_NOT_BLOCK)
1299 ret = ocfs2_inode_lock(inode, NULL, 0);
1306 ret = generic_permission(inode, mask);
1308 ocfs2_inode_unlock(inode, 0);
1313 static int __ocfs2_write_remove_suid(struct inode *inode,
1314 struct buffer_head *bh)
1318 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1319 struct ocfs2_dinode *di;
1321 trace_ocfs2_write_remove_suid(
1322 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1325 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1326 if (IS_ERR(handle)) {
1327 ret = PTR_ERR(handle);
1332 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1333 OCFS2_JOURNAL_ACCESS_WRITE);
1339 inode->i_mode &= ~S_ISUID;
1340 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1341 inode->i_mode &= ~S_ISGID;
1343 di = (struct ocfs2_dinode *) bh->b_data;
1344 di->i_mode = cpu_to_le16(inode->i_mode);
1345 ocfs2_update_inode_fsync_trans(handle, inode, 0);
1347 ocfs2_journal_dirty(handle, bh);
1350 ocfs2_commit_trans(osb, handle);
1356 * Will look for holes and unwritten extents in the range starting at
1357 * pos for count bytes (inclusive).
1359 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1363 unsigned int extent_flags;
1364 u32 cpos, clusters, extent_len, phys_cpos;
1365 struct super_block *sb = inode->i_sb;
1367 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1368 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1371 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1378 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1383 if (extent_len > clusters)
1384 extent_len = clusters;
1386 clusters -= extent_len;
1393 static int ocfs2_write_remove_suid(struct inode *inode)
1396 struct buffer_head *bh = NULL;
1398 ret = ocfs2_read_inode_block(inode, &bh);
1404 ret = __ocfs2_write_remove_suid(inode, bh);
1411 * Allocate enough extents to cover the region starting at byte offset
1412 * start for len bytes. Existing extents are skipped, any extents
1413 * added are marked as "unwritten".
1415 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1419 u32 cpos, phys_cpos, clusters, alloc_size;
1420 u64 end = start + len;
1421 struct buffer_head *di_bh = NULL;
1423 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1424 ret = ocfs2_read_inode_block(inode, &di_bh);
1431 * Nothing to do if the requested reservation range
1432 * fits within the inode.
1434 if (ocfs2_size_fits_inline_data(di_bh, end))
1437 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1445 * We consider both start and len to be inclusive.
1447 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1448 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1452 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1460 * Hole or existing extent len can be arbitrary, so
1461 * cap it to our own allocation request.
1463 if (alloc_size > clusters)
1464 alloc_size = clusters;
1468 * We already have an allocation at this
1469 * region so we can safely skip it.
1474 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1483 clusters -= alloc_size;
1494 * Truncate a byte range, avoiding pages within partial clusters. This
1495 * preserves those pages for the zeroing code to write to.
1497 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1500 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1502 struct address_space *mapping = inode->i_mapping;
1504 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1505 end = byte_start + byte_len;
1506 end = end & ~(osb->s_clustersize - 1);
1509 unmap_mapping_range(mapping, start, end - start, 0);
1510 truncate_inode_pages_range(mapping, start, end - 1);
1514 static int ocfs2_zero_partial_clusters(struct inode *inode,
1518 u64 tmpend, end = start + len;
1519 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1520 unsigned int csize = osb->s_clustersize;
1524 * The "start" and "end" values are NOT necessarily part of
1525 * the range whose allocation is being deleted. Rather, this
1526 * is what the user passed in with the request. We must zero
1527 * partial clusters here. There's no need to worry about
1528 * physical allocation - the zeroing code knows to skip holes.
1530 trace_ocfs2_zero_partial_clusters(
1531 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1532 (unsigned long long)start, (unsigned long long)end);
1535 * If both edges are on a cluster boundary then there's no
1536 * zeroing required as the region is part of the allocation to
1539 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1542 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1543 if (IS_ERR(handle)) {
1544 ret = PTR_ERR(handle);
1550 * We want to get the byte offset of the end of the 1st cluster.
1552 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1556 trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
1557 (unsigned long long)tmpend);
1559 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1565 * This may make start and end equal, but the zeroing
1566 * code will skip any work in that case so there's no
1567 * need to catch it up here.
1569 start = end & ~(osb->s_clustersize - 1);
1571 trace_ocfs2_zero_partial_clusters_range2(
1572 (unsigned long long)start, (unsigned long long)end);
1574 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1578 ocfs2_update_inode_fsync_trans(handle, inode, 1);
1580 ocfs2_commit_trans(osb, handle);
1585 static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1588 struct ocfs2_extent_rec *rec = NULL;
1590 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1592 rec = &el->l_recs[i];
1594 if (le32_to_cpu(rec->e_cpos) < pos)
1602 * Helper to calculate the punching pos and length in one run, we handle the
1603 * following three cases in order:
1605 * - remove the entire record
1606 * - remove a partial record
1607 * - no record needs to be removed (hole-punching completed)
1609 static void ocfs2_calc_trunc_pos(struct inode *inode,
1610 struct ocfs2_extent_list *el,
1611 struct ocfs2_extent_rec *rec,
1612 u32 trunc_start, u32 *trunc_cpos,
1613 u32 *trunc_len, u32 *trunc_end,
1614 u64 *blkno, int *done)
1619 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1621 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1623 * remove an entire extent record.
1625 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1627 * Skip holes if any.
1629 if (range < *trunc_end)
1631 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1632 *blkno = le64_to_cpu(rec->e_blkno);
1633 *trunc_end = le32_to_cpu(rec->e_cpos);
1634 } else if (range > trunc_start) {
1636 * remove a partial extent record, which means we're
1637 * removing the last extent record.
1639 *trunc_cpos = trunc_start;
1643 if (range < *trunc_end)
1645 *trunc_len = *trunc_end - trunc_start;
1646 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1647 *blkno = le64_to_cpu(rec->e_blkno) +
1648 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1649 *trunc_end = trunc_start;
1652 * It may have two following possibilities:
1654 * - last record has been removed
1655 * - trunc_start was within a hole
1657 * both two cases mean the completion of hole punching.
1665 static int ocfs2_remove_inode_range(struct inode *inode,
1666 struct buffer_head *di_bh, u64 byte_start,
1669 int ret = 0, flags = 0, done = 0, i;
1670 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1672 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1673 struct ocfs2_cached_dealloc_ctxt dealloc;
1674 struct address_space *mapping = inode->i_mapping;
1675 struct ocfs2_extent_tree et;
1676 struct ocfs2_path *path = NULL;
1677 struct ocfs2_extent_list *el = NULL;
1678 struct ocfs2_extent_rec *rec = NULL;
1679 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1680 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1682 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1683 ocfs2_init_dealloc_ctxt(&dealloc);
1685 trace_ocfs2_remove_inode_range(
1686 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1687 (unsigned long long)byte_start,
1688 (unsigned long long)byte_len);
1693 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1694 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1695 byte_start + byte_len, 0);
1701 * There's no need to get fancy with the page cache
1702 * truncate of an inline-data inode. We're talking
1703 * about less than a page here, which will be cached
1704 * in the dinode buffer anyway.
1706 unmap_mapping_range(mapping, 0, 0, 0);
1707 truncate_inode_pages(mapping, 0);
1712 * For reflinks, we may need to CoW 2 clusters which might be
1713 * partially zero'd later, if hole's start and end offset were
1714 * within one cluster(means is not exactly aligned to clustersize).
1717 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
1719 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1725 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1732 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1733 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1734 cluster_in_el = trunc_end;
1736 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1742 path = ocfs2_new_path_from_et(&et);
1749 while (trunc_end > trunc_start) {
1751 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1758 el = path_leaf_el(path);
1760 i = ocfs2_find_rec(el, trunc_end);
1762 * Need to go to previous extent block.
1765 if (path->p_tree_depth == 0)
1768 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1777 * We've reached the leftmost extent block,
1778 * it's safe to leave.
1780 if (cluster_in_el == 0)
1784 * The 'pos' searched for previous extent block is
1785 * always one cluster less than actual trunc_end.
1787 trunc_end = cluster_in_el + 1;
1789 ocfs2_reinit_path(path, 1);
1794 rec = &el->l_recs[i];
1796 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1797 &trunc_len, &trunc_end, &blkno, &done);
1801 flags = rec->e_flags;
1802 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1804 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1805 phys_cpos, trunc_len, flags,
1806 &dealloc, refcount_loc, false);
1812 cluster_in_el = trunc_end;
1814 ocfs2_reinit_path(path, 1);
1817 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1820 ocfs2_free_path(path);
1821 ocfs2_schedule_truncate_log_flush(osb, 1);
1822 ocfs2_run_deallocs(osb, &dealloc);
1828 * Parts of this function taken from xfs_change_file_space()
1830 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1831 loff_t f_pos, unsigned int cmd,
1832 struct ocfs2_space_resv *sr,
1838 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1839 struct buffer_head *di_bh = NULL;
1841 unsigned long long max_off = inode->i_sb->s_maxbytes;
1843 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1846 mutex_lock(&inode->i_mutex);
1849 * This prevents concurrent writes on other nodes
1851 ret = ocfs2_rw_lock(inode, 1);
1857 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1863 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1865 goto out_inode_unlock;
1868 switch (sr->l_whence) {
1869 case 0: /*SEEK_SET*/
1871 case 1: /*SEEK_CUR*/
1872 sr->l_start += f_pos;
1874 case 2: /*SEEK_END*/
1875 sr->l_start += i_size_read(inode);
1879 goto out_inode_unlock;
1883 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1886 || sr->l_start > max_off
1887 || (sr->l_start + llen) < 0
1888 || (sr->l_start + llen) > max_off) {
1890 goto out_inode_unlock;
1892 size = sr->l_start + sr->l_len;
1894 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1895 cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1896 if (sr->l_len <= 0) {
1898 goto out_inode_unlock;
1902 if (file && should_remove_suid(file->f_path.dentry)) {
1903 ret = __ocfs2_write_remove_suid(inode, di_bh);
1906 goto out_inode_unlock;
1910 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1912 case OCFS2_IOC_RESVSP:
1913 case OCFS2_IOC_RESVSP64:
1915 * This takes unsigned offsets, but the signed ones we
1916 * pass have been checked against overflow above.
1918 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1921 case OCFS2_IOC_UNRESVSP:
1922 case OCFS2_IOC_UNRESVSP64:
1923 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1929 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1932 goto out_inode_unlock;
1936 * We update c/mtime for these changes
1938 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1939 if (IS_ERR(handle)) {
1940 ret = PTR_ERR(handle);
1942 goto out_inode_unlock;
1945 if (change_size && i_size_read(inode) < size)
1946 i_size_write(inode, size);
1948 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1949 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1953 if (file && (file->f_flags & O_SYNC))
1956 ocfs2_commit_trans(osb, handle);
1960 ocfs2_inode_unlock(inode, 1);
1962 ocfs2_rw_unlock(inode, 1);
1965 mutex_unlock(&inode->i_mutex);
1969 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1970 struct ocfs2_space_resv *sr)
1972 struct inode *inode = file_inode(file);
1973 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1976 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1977 !ocfs2_writes_unwritten_extents(osb))
1979 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1980 !ocfs2_sparse_alloc(osb))
1983 if (!S_ISREG(inode->i_mode))
1986 if (!(file->f_mode & FMODE_WRITE))
1989 ret = mnt_want_write_file(file);
1992 ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1993 mnt_drop_write_file(file);
1997 static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2000 struct inode *inode = file_inode(file);
2001 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2002 struct ocfs2_space_resv sr;
2003 int change_size = 1;
2004 int cmd = OCFS2_IOC_RESVSP64;
2006 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2008 if (!ocfs2_writes_unwritten_extents(osb))
2011 if (mode & FALLOC_FL_KEEP_SIZE)
2014 if (mode & FALLOC_FL_PUNCH_HOLE)
2015 cmd = OCFS2_IOC_UNRESVSP64;
2018 sr.l_start = (s64)offset;
2019 sr.l_len = (s64)len;
2021 return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2025 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2029 unsigned int extent_flags;
2030 u32 cpos, clusters, extent_len, phys_cpos;
2031 struct super_block *sb = inode->i_sb;
2033 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2034 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
2035 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2038 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2039 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2042 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2049 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2054 if (extent_len > clusters)
2055 extent_len = clusters;
2057 clusters -= extent_len;
2064 static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2066 int blockmask = inode->i_sb->s_blocksize - 1;
2067 loff_t final_size = pos + count;
2069 if ((pos & blockmask) || (final_size & blockmask))
2074 static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2076 loff_t pos, size_t count,
2080 struct buffer_head *di_bh = NULL;
2081 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2083 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2085 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2093 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2101 static int ocfs2_prepare_inode_for_write(struct file *file,
2108 int ret = 0, meta_level = 0;
2109 struct dentry *dentry = file->f_path.dentry;
2110 struct inode *inode = dentry->d_inode;
2111 loff_t saved_pos = 0, end;
2114 * We start with a read level meta lock and only jump to an ex
2115 * if we need to make modifications here.
2118 ret = ocfs2_inode_lock(inode, NULL, meta_level);
2125 /* Clear suid / sgid if necessary. We do this here
2126 * instead of later in the write path because
2127 * remove_suid() calls ->setattr without any hint that
2128 * we may have already done our cluster locking. Since
2129 * ocfs2_setattr() *must* take cluster locks to
2130 * proceed, this will lead us to recursively lock the
2131 * inode. There's also the dinode i_size state which
2132 * can be lost via setattr during extending writes (we
2133 * set inode->i_size at the end of a write. */
2134 if (should_remove_suid(dentry)) {
2135 if (meta_level == 0) {
2136 ocfs2_inode_unlock(inode, meta_level);
2141 ret = ocfs2_write_remove_suid(inode);
2148 /* work on a copy of ppos until we're sure that we won't have
2149 * to recalculate it due to relocking. */
2151 saved_pos = i_size_read(inode);
2155 end = saved_pos + count;
2157 ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
2159 ocfs2_inode_unlock(inode, meta_level);
2162 ret = ocfs2_prepare_inode_for_refcount(inode,
2179 * Skip the O_DIRECT checks if we don't need
2182 if (!direct_io || !(*direct_io))
2186 * There's no sane way to do direct writes to an inode
2189 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2195 * Allowing concurrent direct writes means
2196 * i_size changes wouldn't be synchronized, so
2197 * one node could wind up truncating another
2200 if (end > i_size_read(inode)) {
2206 * We don't fill holes during direct io, so
2207 * check for them here. If any are found, the
2208 * caller will have to retake some cluster
2209 * locks and initiate the io as buffered.
2211 ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
2224 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2225 saved_pos, appending, count,
2226 direct_io, has_refcount);
2228 if (meta_level >= 0)
2229 ocfs2_inode_unlock(inode, meta_level);
2235 static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2236 struct iov_iter *from)
2238 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
2239 int can_do_direct, has_refcount = 0;
2240 ssize_t written = 0;
2241 size_t count = iov_iter_count(from);
2242 loff_t old_size, *ppos = &iocb->ki_pos;
2244 struct file *file = iocb->ki_filp;
2245 struct inode *inode = file_inode(file);
2246 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2247 int full_coherency = !(osb->s_mount_opt &
2248 OCFS2_MOUNT_COHERENCY_BUFFERED);
2249 int unaligned_dio = 0;
2251 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
2252 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2253 file->f_path.dentry->d_name.len,
2254 file->f_path.dentry->d_name.name,
2255 (unsigned int)from->nr_segs); /* GRRRRR */
2257 if (iocb->ki_nbytes == 0)
2260 appending = file->f_flags & O_APPEND ? 1 : 0;
2261 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
2263 mutex_lock(&inode->i_mutex);
2265 ocfs2_iocb_clear_sem_locked(iocb);
2268 /* to match setattr's i_mutex -> rw_lock ordering */
2271 /* communicate with ocfs2_dio_end_io */
2272 ocfs2_iocb_set_sem_locked(iocb);
2276 * Concurrent O_DIRECT writes are allowed with
2277 * mount_option "coherency=buffered".
2279 rw_level = (!direct_io || full_coherency);
2281 ret = ocfs2_rw_lock(inode, rw_level);
2288 * O_DIRECT writes with "coherency=full" need to take EX cluster
2289 * inode_lock to guarantee coherency.
2291 if (direct_io && full_coherency) {
2293 * We need to take and drop the inode lock to force
2294 * other nodes to drop their caches. Buffered I/O
2295 * already does this in write_begin().
2297 ret = ocfs2_inode_lock(inode, NULL, 1);
2303 ocfs2_inode_unlock(inode, 1);
2306 can_do_direct = direct_io;
2307 ret = ocfs2_prepare_inode_for_write(file, ppos,
2308 iocb->ki_nbytes, appending,
2309 &can_do_direct, &has_refcount);
2315 if (direct_io && !is_sync_kiocb(iocb))
2316 unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_nbytes,
2320 * We can't complete the direct I/O as requested, fall back to
2323 if (direct_io && !can_do_direct) {
2324 ocfs2_rw_unlock(inode, rw_level);
2333 if (unaligned_dio) {
2335 * Wait on previous unaligned aio to complete before
2338 mutex_lock(&OCFS2_I(inode)->ip_unaligned_aio);
2339 /* Mark the iocb as needing an unlock in ocfs2_dio_end_io */
2340 ocfs2_iocb_set_unaligned_aio(iocb);
2344 * To later detect whether a journal commit for sync writes is
2345 * necessary, we sample i_size, and cluster count here.
2347 old_size = i_size_read(inode);
2348 old_clusters = OCFS2_I(inode)->ip_clusters;
2350 /* communicate with ocfs2_dio_end_io */
2351 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2353 ret = generic_write_checks(file, ppos, &count,
2354 S_ISBLK(inode->i_mode));
2358 iov_iter_truncate(from, count);
2360 written = generic_file_direct_write(iocb, from, *ppos);
2366 current->backing_dev_info = file->f_mapping->backing_dev_info;
2367 written = generic_perform_write(file, from, *ppos);
2368 if (likely(written >= 0))
2369 iocb->ki_pos = *ppos + written;
2370 current->backing_dev_info = NULL;
2374 /* buffered aio wouldn't have proper lock coverage today */
2375 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2377 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2378 ((file->f_flags & O_DIRECT) && !direct_io)) {
2379 ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
2385 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2391 ret = filemap_fdatawait_range(file->f_mapping, *ppos,
2396 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2397 * function pointer which is called when o_direct io completes so that
2398 * it can unlock our rw lock.
2399 * Unfortunately there are error cases which call end_io and others
2400 * that don't. so we don't have to unlock the rw_lock if either an
2401 * async dio is going to do it in the future or an end_io after an
2402 * error has already done it.
2404 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2410 if (unaligned_dio) {
2411 ocfs2_iocb_clear_unaligned_aio(iocb);
2412 mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
2417 ocfs2_rw_unlock(inode, rw_level);
2421 ocfs2_iocb_clear_sem_locked(iocb);
2423 mutex_unlock(&inode->i_mutex);
2430 static ssize_t ocfs2_file_splice_read(struct file *in,
2432 struct pipe_inode_info *pipe,
2436 int ret = 0, lock_level = 0;
2437 struct inode *inode = file_inode(in);
2439 trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2440 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2441 in->f_path.dentry->d_name.len,
2442 in->f_path.dentry->d_name.name, len);
2445 * See the comment in ocfs2_file_read_iter()
2447 ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
2452 ocfs2_inode_unlock(inode, lock_level);
2454 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2460 static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2461 struct iov_iter *to)
2463 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2464 struct file *filp = iocb->ki_filp;
2465 struct inode *inode = file_inode(filp);
2467 trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
2468 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2469 filp->f_path.dentry->d_name.len,
2470 filp->f_path.dentry->d_name.name,
2471 to->nr_segs); /* GRRRRR */
2480 ocfs2_iocb_clear_sem_locked(iocb);
2483 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2484 * need locks to protect pending reads from racing with truncate.
2486 if (filp->f_flags & O_DIRECT) {
2488 ocfs2_iocb_set_sem_locked(iocb);
2490 ret = ocfs2_rw_lock(inode, 0);
2496 /* communicate with ocfs2_dio_end_io */
2497 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2501 * We're fine letting folks race truncates and extending
2502 * writes with read across the cluster, just like they can
2503 * locally. Hence no rw_lock during read.
2505 * Take and drop the meta data lock to update inode fields
2506 * like i_size. This allows the checks down below
2507 * generic_file_aio_read() a chance of actually working.
2509 ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
2514 ocfs2_inode_unlock(inode, lock_level);
2516 ret = generic_file_read_iter(iocb, to);
2517 trace_generic_file_aio_read_ret(ret);
2519 /* buffered aio wouldn't have proper lock coverage today */
2520 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2522 /* see ocfs2_file_write_iter */
2523 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2530 ocfs2_iocb_clear_sem_locked(iocb);
2533 ocfs2_rw_unlock(inode, rw_level);
2538 /* Refer generic_file_llseek_unlocked() */
2539 static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2541 struct inode *inode = file->f_mapping->host;
2544 mutex_lock(&inode->i_mutex);
2550 /* SEEK_END requires the OCFS2 inode lock for the file
2551 * because it references the file's size.
2553 ret = ocfs2_inode_lock(inode, NULL, 0);
2558 offset += i_size_read(inode);
2559 ocfs2_inode_unlock(inode, 0);
2563 offset = file->f_pos;
2566 offset += file->f_pos;
2570 ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2579 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2582 mutex_unlock(&inode->i_mutex);
2588 const struct inode_operations ocfs2_file_iops = {
2589 .setattr = ocfs2_setattr,
2590 .getattr = ocfs2_getattr,
2591 .permission = ocfs2_permission,
2592 .setxattr = generic_setxattr,
2593 .getxattr = generic_getxattr,
2594 .listxattr = ocfs2_listxattr,
2595 .removexattr = generic_removexattr,
2596 .fiemap = ocfs2_fiemap,
2597 .get_acl = ocfs2_iop_get_acl,
2598 .set_acl = ocfs2_iop_set_acl,
2601 const struct inode_operations ocfs2_special_file_iops = {
2602 .setattr = ocfs2_setattr,
2603 .getattr = ocfs2_getattr,
2604 .permission = ocfs2_permission,
2605 .get_acl = ocfs2_iop_get_acl,
2606 .set_acl = ocfs2_iop_set_acl,
2610 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2611 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2613 const struct file_operations ocfs2_fops = {
2614 .llseek = ocfs2_file_llseek,
2615 .read = new_sync_read,
2616 .write = new_sync_write,
2618 .fsync = ocfs2_sync_file,
2619 .release = ocfs2_file_release,
2620 .open = ocfs2_file_open,
2621 .read_iter = ocfs2_file_read_iter,
2622 .write_iter = ocfs2_file_write_iter,
2623 .unlocked_ioctl = ocfs2_ioctl,
2624 #ifdef CONFIG_COMPAT
2625 .compat_ioctl = ocfs2_compat_ioctl,
2628 .flock = ocfs2_flock,
2629 .splice_read = ocfs2_file_splice_read,
2630 .splice_write = iter_file_splice_write,
2631 .fallocate = ocfs2_fallocate,
2634 const struct file_operations ocfs2_dops = {
2635 .llseek = generic_file_llseek,
2636 .read = generic_read_dir,
2637 .iterate = ocfs2_readdir,
2638 .fsync = ocfs2_sync_file,
2639 .release = ocfs2_dir_release,
2640 .open = ocfs2_dir_open,
2641 .unlocked_ioctl = ocfs2_ioctl,
2642 #ifdef CONFIG_COMPAT
2643 .compat_ioctl = ocfs2_compat_ioctl,
2646 .flock = ocfs2_flock,
2650 * POSIX-lockless variants of our file_operations.
2652 * These will be used if the underlying cluster stack does not support
2653 * posix file locking, if the user passes the "localflocks" mount
2654 * option, or if we have a local-only fs.
2656 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2657 * so we still want it in the case of no stack support for
2658 * plocks. Internally, it will do the right thing when asked to ignore
2661 const struct file_operations ocfs2_fops_no_plocks = {
2662 .llseek = ocfs2_file_llseek,
2663 .read = new_sync_read,
2664 .write = new_sync_write,
2666 .fsync = ocfs2_sync_file,
2667 .release = ocfs2_file_release,
2668 .open = ocfs2_file_open,
2669 .read_iter = ocfs2_file_read_iter,
2670 .write_iter = ocfs2_file_write_iter,
2671 .unlocked_ioctl = ocfs2_ioctl,
2672 #ifdef CONFIG_COMPAT
2673 .compat_ioctl = ocfs2_compat_ioctl,
2675 .flock = ocfs2_flock,
2676 .splice_read = ocfs2_file_splice_read,
2677 .splice_write = iter_file_splice_write,
2678 .fallocate = ocfs2_fallocate,
2681 const struct file_operations ocfs2_dops_no_plocks = {
2682 .llseek = generic_file_llseek,
2683 .read = generic_read_dir,
2684 .iterate = ocfs2_readdir,
2685 .fsync = ocfs2_sync_file,
2686 .release = ocfs2_dir_release,
2687 .open = ocfs2_dir_open,
2688 .unlocked_ioctl = ocfs2_ioctl,
2689 #ifdef CONFIG_COMPAT
2690 .compat_ioctl = ocfs2_compat_ioctl,
2692 .flock = ocfs2_flock,