2 * linux/fs/9p/vfs_file.c
4 * This file contians vfs file ops for 9P2000.
6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to:
20 * Free Software Foundation
21 * 51 Franklin Street, Fifth Floor
22 * Boston, MA 02111-1301 USA
26 #include <linux/module.h>
27 #include <linux/errno.h>
29 #include <linux/sched.h>
30 #include <linux/file.h>
31 #include <linux/stat.h>
32 #include <linux/string.h>
33 #include <linux/inet.h>
34 #include <linux/list.h>
35 #include <linux/pagemap.h>
36 #include <linux/utsname.h>
37 #include <asm/uaccess.h>
38 #include <linux/idr.h>
39 #include <net/9p/9p.h>
40 #include <net/9p/client.h>
47 static const struct vm_operations_struct v9fs_file_vm_ops;
50 * v9fs_file_open - open a file (or directory)
51 * @inode: inode to be opened
52 * @file: file being opened
56 int v9fs_file_open(struct inode *inode, struct file *file)
59 struct v9fs_inode *v9inode;
60 struct v9fs_session_info *v9ses;
64 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
65 v9inode = V9FS_I(inode);
66 v9ses = v9fs_inode2v9ses(inode);
67 if (v9fs_proto_dotl(v9ses))
68 omode = v9fs_open_to_dotl_flags(file->f_flags);
70 omode = v9fs_uflags2omode(file->f_flags,
71 v9fs_proto_dotu(v9ses));
72 fid = file->private_data;
74 fid = v9fs_fid_clone(file->f_path.dentry);
78 err = p9_client_open(fid, omode);
83 if ((file->f_flags & O_APPEND) &&
84 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
85 generic_file_llseek(file, 0, SEEK_END);
88 file->private_data = fid;
89 mutex_lock(&v9inode->v_mutex);
90 if (v9ses->cache && !v9inode->writeback_fid &&
91 ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
93 * clone a fid and add it to writeback_fid
94 * we do it during open time instead of
95 * page dirty time via write_begin/page_mkwrite
96 * because we want write after unlink usecase
99 fid = v9fs_writeback_fid(file->f_path.dentry);
102 mutex_unlock(&v9inode->v_mutex);
105 v9inode->writeback_fid = (void *) fid;
107 mutex_unlock(&v9inode->v_mutex);
109 v9fs_cache_inode_set_cookie(inode, file);
112 p9_client_clunk(file->private_data);
113 file->private_data = NULL;
118 * v9fs_file_lock - lock a file (or directory)
119 * @filp: file to be locked
121 * @fl: file lock structure
123 * Bugs: this looks like a local only lock, we should extend into 9P
124 * by using open exclusive
127 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
130 struct inode *inode = file_inode(filp);
132 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
134 /* No mandatory locks */
135 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
138 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
139 filemap_write_and_wait(inode->i_mapping);
140 invalidate_mapping_pages(&inode->i_data, 0, -1);
146 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
148 struct p9_flock flock;
152 unsigned char fl_type;
154 fid = filp->private_data;
157 if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
160 res = posix_lock_file_wait(filp, fl);
164 /* convert posix lock to p9 tlock args */
165 memset(&flock, 0, sizeof(flock));
166 /* map the lock type */
167 switch (fl->fl_type) {
169 flock.type = P9_LOCK_TYPE_RDLCK;
172 flock.type = P9_LOCK_TYPE_WRLCK;
175 flock.type = P9_LOCK_TYPE_UNLCK;
178 flock.start = fl->fl_start;
179 if (fl->fl_end == OFFSET_MAX)
182 flock.length = fl->fl_end - fl->fl_start + 1;
183 flock.proc_id = fl->fl_pid;
184 flock.client_id = fid->clnt->name;
186 flock.flags = P9_LOCK_FLAGS_BLOCK;
189 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
190 * for lock request, keep on trying
193 res = p9_client_lock_dotl(fid, &flock, &status);
197 if (status != P9_LOCK_BLOCKED)
199 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
201 if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
205 /* map 9p status to VFS status */
207 case P9_LOCK_SUCCESS:
210 case P9_LOCK_BLOCKED:
222 * incase server returned error for lock request, revert
225 if (res < 0 && fl->fl_type != F_UNLCK) {
226 fl_type = fl->fl_type;
227 fl->fl_type = F_UNLCK;
228 res = posix_lock_file_wait(filp, fl);
229 fl->fl_type = fl_type;
235 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
237 struct p9_getlock glock;
241 fid = filp->private_data;
244 posix_test_lock(filp, fl);
246 * if we have a conflicting lock locally, no need to validate
249 if (fl->fl_type != F_UNLCK)
252 /* convert posix lock to p9 tgetlock args */
253 memset(&glock, 0, sizeof(glock));
254 glock.type = P9_LOCK_TYPE_UNLCK;
255 glock.start = fl->fl_start;
256 if (fl->fl_end == OFFSET_MAX)
259 glock.length = fl->fl_end - fl->fl_start + 1;
260 glock.proc_id = fl->fl_pid;
261 glock.client_id = fid->clnt->name;
263 res = p9_client_getlock_dotl(fid, &glock);
266 /* map 9p lock type to os lock type */
267 switch (glock.type) {
268 case P9_LOCK_TYPE_RDLCK:
269 fl->fl_type = F_RDLCK;
271 case P9_LOCK_TYPE_WRLCK:
272 fl->fl_type = F_WRLCK;
274 case P9_LOCK_TYPE_UNLCK:
275 fl->fl_type = F_UNLCK;
278 if (glock.type != P9_LOCK_TYPE_UNLCK) {
279 fl->fl_start = glock.start;
280 if (glock.length == 0)
281 fl->fl_end = OFFSET_MAX;
283 fl->fl_end = glock.start + glock.length - 1;
284 fl->fl_pid = glock.proc_id;
290 * v9fs_file_lock_dotl - lock a file (or directory)
291 * @filp: file to be locked
293 * @fl: file lock structure
297 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
299 struct inode *inode = file_inode(filp);
302 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
303 filp, cmd, fl, filp->f_path.dentry->d_name.name);
305 /* No mandatory locks */
306 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
309 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
310 filemap_write_and_wait(inode->i_mapping);
311 invalidate_mapping_pages(&inode->i_data, 0, -1);
314 if (IS_SETLK(cmd) || IS_SETLKW(cmd))
315 ret = v9fs_file_do_lock(filp, cmd, fl);
316 else if (IS_GETLK(cmd))
317 ret = v9fs_file_getlock(filp, fl);
325 * v9fs_file_flock_dotl - lock a file
326 * @filp: file to be locked
328 * @fl: file lock structure
332 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
333 struct file_lock *fl)
335 struct inode *inode = file_inode(filp);
338 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
339 filp, cmd, fl, filp->f_path.dentry->d_name.name);
341 /* No mandatory locks */
342 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
345 if (!(fl->fl_flags & FL_FLOCK))
348 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
349 filemap_write_and_wait(inode->i_mapping);
350 invalidate_mapping_pages(&inode->i_data, 0, -1);
352 /* Convert flock to posix lock */
353 fl->fl_owner = (fl_owner_t)filp;
355 fl->fl_end = OFFSET_MAX;
356 fl->fl_flags |= FL_POSIX;
357 fl->fl_flags ^= FL_FLOCK;
359 if (IS_SETLK(cmd) | IS_SETLKW(cmd))
360 ret = v9fs_file_do_lock(filp, cmd, fl);
368 * v9fs_fid_readn - read from a fid
370 * @data: data buffer to read data into
371 * @udata: user data buffer to read data into
372 * @count: size of buffer
373 * @offset: offset at which to read data
377 v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
382 p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n",
383 fid->fid, (long long unsigned)offset, count);
386 size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
388 n = p9_client_read(fid, data, udata, offset, count);
400 } while (count > 0 && n == size);
409 * v9fs_file_readn - read from a file
410 * @filp: file pointer to read
411 * @data: data buffer to read data into
412 * @udata: user data buffer to read data into
413 * @count: size of buffer
414 * @offset: offset at which to read data
418 v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
421 return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
425 * v9fs_file_read - read from a file
426 * @filp: file pointer to read
427 * @udata: user data buffer to read data into
428 * @count: size of buffer
429 * @offset: offset at which to read data
434 v9fs_file_read(struct file *filp, char __user *udata, size_t count,
441 p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
442 fid = filp->private_data;
444 size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
446 ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
448 ret = p9_client_read(fid, NULL, udata, *offset, count);
457 v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
458 const char __user *data, size_t count,
459 loff_t *offset, int invalidate)
464 loff_t origin = *offset;
465 unsigned long pg_start, pg_end;
467 p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
468 data, (int)count, (int)*offset);
471 n = p9_client_write(fid, NULL, data+total, origin+total, count);
478 if (invalidate && (total > 0)) {
479 pg_start = origin >> PAGE_CACHE_SHIFT;
480 pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
481 if (inode->i_mapping && inode->i_mapping->nrpages)
482 invalidate_inode_pages2_range(inode->i_mapping,
485 i_size = i_size_read(inode);
486 if (*offset > i_size) {
487 inode_add_bytes(inode, *offset - i_size);
488 i_size_write(inode, *offset);
498 * v9fs_file_write - write to a file
499 * @filp: file pointer to write
500 * @data: data buffer to write data from
501 * @count: size of buffer
502 * @offset: offset at which to write data
506 v9fs_file_write(struct file *filp, const char __user * data,
507 size_t count, loff_t *offset)
510 loff_t origin = *offset;
513 retval = generic_write_checks(filp, &origin, &count, 0);
518 if ((ssize_t) count < 0)
524 retval = v9fs_file_write_internal(file_inode(filp),
526 data, count, &origin, 1);
527 /* update offset on successful write */
535 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
539 struct inode *inode = filp->f_mapping->host;
540 struct p9_wstat wstat;
543 retval = filemap_write_and_wait_range(inode->i_mapping, start, end);
547 mutex_lock(&inode->i_mutex);
548 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
550 fid = filp->private_data;
551 v9fs_blank_wstat(&wstat);
553 retval = p9_client_wstat(fid, &wstat);
554 mutex_unlock(&inode->i_mutex);
559 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
563 struct inode *inode = filp->f_mapping->host;
566 retval = filemap_write_and_wait_range(inode->i_mapping, start, end);
570 mutex_lock(&inode->i_mutex);
571 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
573 fid = filp->private_data;
575 retval = p9_client_fsync(fid, datasync);
576 mutex_unlock(&inode->i_mutex);
582 v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
586 retval = generic_file_mmap(file, vma);
588 vma->vm_ops = &v9fs_file_vm_ops;
594 v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
596 struct v9fs_inode *v9inode;
597 struct page *page = vmf->page;
598 struct file *filp = vma->vm_file;
599 struct inode *inode = file_inode(filp);
602 p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
603 page, (unsigned long)filp->private_data);
605 /* Update file times before taking page lock */
606 file_update_time(filp);
608 v9inode = V9FS_I(inode);
609 /* make sure the cache has finished storing the page */
610 v9fs_fscache_wait_on_page_write(inode, page);
611 BUG_ON(!v9inode->writeback_fid);
613 if (page->mapping != inode->i_mapping)
615 wait_for_stable_page(page);
617 return VM_FAULT_LOCKED;
620 return VM_FAULT_NOPAGE;
624 v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
629 struct address_space *mapping;
632 mapping = filp->f_mapping;
633 inode = mapping->host;
636 size = i_size_read(inode);
638 filemap_write_and_wait_range(mapping, offset,
641 return v9fs_file_read(filp, udata, count, offsetp);
645 * v9fs_cached_file_read - read from a file
646 * @filp: file pointer to read
647 * @udata: user data buffer to read data into
648 * @count: size of buffer
649 * @offset: offset at which to read data
653 v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
656 if (filp->f_flags & O_DIRECT)
657 return v9fs_direct_read(filp, data, count, offset);
658 return do_sync_read(filp, data, count, offset);
662 v9fs_direct_write(struct file *filp, const char __user * data,
663 size_t count, loff_t *offsetp)
668 struct address_space *mapping;
671 mapping = filp->f_mapping;
672 inode = mapping->host;
676 mutex_lock(&inode->i_mutex);
677 retval = filemap_write_and_wait_range(mapping, offset,
682 * After a write we want buffered reads to be sure to go to disk to get
683 * the new data. We invalidate clean cached page from the region we're
684 * about to write. We do this *before* the write so that if we fail
685 * here we fall back to buffered write
687 if (mapping->nrpages) {
688 pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
689 pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
691 retval = invalidate_inode_pages2_range(mapping,
694 * If a page can not be invalidated, fall back
698 if (retval == -EBUSY)
703 retval = v9fs_file_write(filp, data, count, offsetp);
705 mutex_unlock(&inode->i_mutex);
709 mutex_unlock(&inode->i_mutex);
710 return do_sync_write(filp, data, count, offsetp);
714 * v9fs_cached_file_write - write to a file
715 * @filp: file pointer to write
716 * @data: data buffer to write data from
717 * @count: size of buffer
718 * @offset: offset at which to write data
722 v9fs_cached_file_write(struct file *filp, const char __user * data,
723 size_t count, loff_t *offset)
726 if (filp->f_flags & O_DIRECT)
727 return v9fs_direct_write(filp, data, count, offset);
728 return do_sync_write(filp, data, count, offset);
731 static const struct vm_operations_struct v9fs_file_vm_ops = {
732 .fault = filemap_fault,
733 .page_mkwrite = v9fs_vm_page_mkwrite,
734 .remap_pages = generic_file_remap_pages,
738 const struct file_operations v9fs_cached_file_operations = {
739 .llseek = generic_file_llseek,
740 .read = v9fs_cached_file_read,
741 .write = v9fs_cached_file_write,
742 .aio_read = generic_file_aio_read,
743 .aio_write = generic_file_aio_write,
744 .open = v9fs_file_open,
745 .release = v9fs_dir_release,
746 .lock = v9fs_file_lock,
747 .mmap = v9fs_file_mmap,
748 .fsync = v9fs_file_fsync,
751 const struct file_operations v9fs_cached_file_operations_dotl = {
752 .llseek = generic_file_llseek,
753 .read = v9fs_cached_file_read,
754 .write = v9fs_cached_file_write,
755 .aio_read = generic_file_aio_read,
756 .aio_write = generic_file_aio_write,
757 .open = v9fs_file_open,
758 .release = v9fs_dir_release,
759 .lock = v9fs_file_lock_dotl,
760 .flock = v9fs_file_flock_dotl,
761 .mmap = v9fs_file_mmap,
762 .fsync = v9fs_file_fsync_dotl,
765 const struct file_operations v9fs_file_operations = {
766 .llseek = generic_file_llseek,
767 .read = v9fs_file_read,
768 .write = v9fs_file_write,
769 .open = v9fs_file_open,
770 .release = v9fs_dir_release,
771 .lock = v9fs_file_lock,
772 .mmap = generic_file_readonly_mmap,
773 .fsync = v9fs_file_fsync,
776 const struct file_operations v9fs_file_operations_dotl = {
777 .llseek = generic_file_llseek,
778 .read = v9fs_file_read,
779 .write = v9fs_file_write,
780 .open = v9fs_file_open,
781 .release = v9fs_dir_release,
782 .lock = v9fs_file_lock_dotl,
783 .flock = v9fs_file_flock_dotl,
784 .mmap = generic_file_readonly_mmap,
785 .fsync = v9fs_file_fsync_dotl,