4 * Writing file data over NFS.
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
49 #include <linux/types.h>
50 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/file.h>
54 #include <linux/writeback.h>
56 #include <linux/sunrpc/clnt.h>
57 #include <linux/nfs_fs.h>
58 #include <linux/nfs_mount.h>
59 #include <linux/nfs_page.h>
60 #include <linux/backing-dev.h>
62 #include <asm/uaccess.h>
63 #include <linux/smp_lock.h>
65 #include "delegation.h"
69 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
71 #define MIN_POOL_WRITE (32)
72 #define MIN_POOL_COMMIT (4)
75 * Local function declarations
77 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
79 unsigned int, unsigned int);
80 static int nfs_wait_on_write_congestion(struct address_space *, int);
81 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
82 static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
83 static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how);
84 static const struct rpc_call_ops nfs_write_partial_ops;
85 static const struct rpc_call_ops nfs_write_full_ops;
86 static const struct rpc_call_ops nfs_commit_ops;
88 static kmem_cache_t *nfs_wdata_cachep;
89 static mempool_t *nfs_wdata_mempool;
90 static mempool_t *nfs_commit_mempool;
92 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
94 struct nfs_write_data *nfs_commit_alloc(void)
96 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
99 memset(p, 0, sizeof(*p));
100 INIT_LIST_HEAD(&p->pages);
105 void nfs_commit_rcu_free(struct rcu_head *head)
107 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
108 if (p && (p->pagevec != &p->page_array[0]))
110 mempool_free(p, nfs_commit_mempool);
113 void nfs_commit_free(struct nfs_write_data *wdata)
115 call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
118 struct nfs_write_data *nfs_writedata_alloc(size_t len)
120 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
121 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
124 memset(p, 0, sizeof(*p));
125 INIT_LIST_HEAD(&p->pages);
126 p->npages = pagecount;
127 if (pagecount <= ARRAY_SIZE(p->page_array))
128 p->pagevec = p->page_array;
130 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
132 mempool_free(p, nfs_wdata_mempool);
140 static void nfs_writedata_rcu_free(struct rcu_head *head)
142 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
143 if (p && (p->pagevec != &p->page_array[0]))
145 mempool_free(p, nfs_wdata_mempool);
148 static void nfs_writedata_free(struct nfs_write_data *wdata)
150 call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
153 void nfs_writedata_release(void *wdata)
155 nfs_writedata_free(wdata);
158 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
160 struct nfs_page *req = NULL;
162 if (PagePrivate(page)) {
163 req = (struct nfs_page *)page_private(page);
165 atomic_inc(&req->wb_count);
170 static struct nfs_page *nfs_page_find_request(struct page *page)
172 struct nfs_page *req = NULL;
173 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
176 req = nfs_page_find_request_locked(page);
177 spin_unlock(req_lock);
181 /* Adjust the file length if we're writing beyond the end */
182 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
184 struct inode *inode = page->mapping->host;
185 loff_t end, i_size = i_size_read(inode);
186 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
188 if (i_size > 0 && page->index < end_index)
190 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
193 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
194 i_size_write(inode, end);
197 /* We can set the PG_uptodate flag if we see that a write request
198 * covers the full page.
200 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
202 if (PageUptodate(page))
206 if (count != nfs_page_length(page))
208 if (count != PAGE_CACHE_SIZE)
209 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
210 SetPageUptodate(page);
213 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
214 unsigned int offset, unsigned int count)
216 struct nfs_page *req;
220 req = nfs_update_request(ctx, page, offset, count);
226 ret = nfs_wb_page(page->mapping->host, page);
230 /* Update file length */
231 nfs_grow_file(page, offset, count);
232 /* Set the PG_uptodate flag? */
233 nfs_mark_uptodate(page, offset, count);
234 nfs_unlock_request(req);
238 static int wb_priority(struct writeback_control *wbc)
240 if (wbc->for_reclaim)
241 return FLUSH_HIGHPRI;
242 if (wbc->for_kupdate)
248 * Write an mmapped page to the server.
250 int nfs_writepage(struct page *page, struct writeback_control *wbc)
252 struct nfs_open_context *ctx;
253 struct inode *inode = page->mapping->host;
254 struct nfs_page *req;
258 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
259 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
261 req = nfs_page_find_request(page);
263 int flushme = test_bit(PG_NEED_FLUSH, &req->wb_flags);
264 nfs_release_request(req);
267 /* Ensure we've flushed out the invalid write */
268 nfs_wb_page_priority(inode, page, wb_priority(wbc));
271 offset = nfs_page_length(page);
275 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
280 err = nfs_writepage_setup(ctx, page, 0, offset);
281 put_nfs_open_context(ctx);
284 if (!wbc->for_writepages)
285 nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
291 * Note: causes nfs_update_request() to block on the assumption
292 * that the writeback is generated due to memory pressure.
294 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
296 struct backing_dev_info *bdi = mapping->backing_dev_info;
297 struct inode *inode = mapping->host;
300 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
302 err = generic_writepages(mapping, wbc);
305 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
306 if (wbc->nonblocking)
308 nfs_wait_on_write_congestion(mapping, 0);
310 err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc));
313 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
314 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
315 err = nfs_wait_on_requests(inode, 0, 0);
319 err = nfs_commit_inode(inode, wb_priority(wbc));
323 clear_bit(BDI_write_congested, &bdi->state);
324 wake_up_all(&nfs_write_congestion);
325 congestion_end(WRITE);
330 * Insert a write request into an inode
332 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
334 struct nfs_inode *nfsi = NFS_I(inode);
337 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
338 BUG_ON(error == -EEXIST);
343 nfs_begin_data_update(inode);
344 if (nfs_have_delegation(inode, FMODE_WRITE))
347 SetPagePrivate(req->wb_page);
348 set_page_private(req->wb_page, (unsigned long)req);
350 atomic_inc(&req->wb_count);
355 * Insert a write request into an inode
357 static void nfs_inode_remove_request(struct nfs_page *req)
359 struct inode *inode = req->wb_context->dentry->d_inode;
360 struct nfs_inode *nfsi = NFS_I(inode);
362 BUG_ON (!NFS_WBACK_BUSY(req));
364 spin_lock(&nfsi->req_lock);
365 set_page_private(req->wb_page, 0);
366 ClearPagePrivate(req->wb_page);
367 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
370 spin_unlock(&nfsi->req_lock);
371 nfs_end_data_update(inode);
374 spin_unlock(&nfsi->req_lock);
375 nfs_clear_request(req);
376 nfs_release_request(req);
380 * Add a request to the inode's dirty list.
383 nfs_mark_request_dirty(struct nfs_page *req)
385 struct inode *inode = req->wb_context->dentry->d_inode;
386 struct nfs_inode *nfsi = NFS_I(inode);
388 spin_lock(&nfsi->req_lock);
389 radix_tree_tag_set(&nfsi->nfs_page_tree,
390 req->wb_index, NFS_PAGE_TAG_DIRTY);
391 nfs_list_add_request(req, &nfsi->dirty);
393 spin_unlock(&nfsi->req_lock);
394 inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
395 mark_inode_dirty(inode);
399 * Check if a request is dirty
402 nfs_dirty_request(struct nfs_page *req)
404 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
405 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
408 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
410 * Add a request to the inode's commit list.
413 nfs_mark_request_commit(struct nfs_page *req)
415 struct inode *inode = req->wb_context->dentry->d_inode;
416 struct nfs_inode *nfsi = NFS_I(inode);
418 spin_lock(&nfsi->req_lock);
419 nfs_list_add_request(req, &nfsi->commit);
421 spin_unlock(&nfsi->req_lock);
422 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
423 mark_inode_dirty(inode);
428 * Wait for a request to complete.
430 * Interruptible by signals only if mounted with intr flag.
432 static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
434 struct nfs_inode *nfsi = NFS_I(inode);
435 struct nfs_page *req;
436 unsigned long idx_end, next;
437 unsigned int res = 0;
443 idx_end = idx_start + npages - 1;
446 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
447 if (req->wb_index > idx_end)
450 next = req->wb_index + 1;
451 BUG_ON(!NFS_WBACK_BUSY(req));
453 atomic_inc(&req->wb_count);
454 spin_unlock(&nfsi->req_lock);
455 error = nfs_wait_on_request(req);
456 nfs_release_request(req);
457 spin_lock(&nfsi->req_lock);
465 static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
467 struct nfs_inode *nfsi = NFS_I(inode);
470 spin_lock(&nfsi->req_lock);
471 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
472 spin_unlock(&nfsi->req_lock);
476 static void nfs_cancel_dirty_list(struct list_head *head)
478 struct nfs_page *req;
479 while(!list_empty(head)) {
480 req = nfs_list_entry(head->next);
481 nfs_list_remove_request(req);
482 nfs_inode_remove_request(req);
483 nfs_clear_page_writeback(req);
487 static void nfs_cancel_commit_list(struct list_head *head)
489 struct nfs_page *req;
491 while(!list_empty(head)) {
492 req = nfs_list_entry(head->next);
493 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
494 nfs_list_remove_request(req);
495 nfs_inode_remove_request(req);
496 nfs_unlock_request(req);
500 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
502 * nfs_scan_commit - Scan an inode for commit requests
503 * @inode: NFS inode to scan
504 * @dst: destination list
505 * @idx_start: lower bound of page->index to scan.
506 * @npages: idx_start + npages sets the upper bound to scan.
508 * Moves requests from the inode's 'commit' request list.
509 * The requests are *not* checked to ensure that they form a contiguous set.
512 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
514 struct nfs_inode *nfsi = NFS_I(inode);
517 if (nfsi->ncommit != 0) {
518 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
519 nfsi->ncommit -= res;
520 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
521 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
526 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
532 static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
534 struct backing_dev_info *bdi = mapping->backing_dev_info;
540 if (!bdi_write_congested(bdi))
543 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
546 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
549 rpc_clnt_sigmask(clnt, &oldset);
550 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
551 if (bdi_write_congested(bdi)) {
557 rpc_clnt_sigunmask(clnt, &oldset);
559 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
560 if (bdi_write_congested(bdi))
563 finish_wait(&nfs_write_congestion, &wait);
569 * Try to update any existing write request, or create one if there is none.
570 * In order to match, the request's credentials must match those of
571 * the calling process.
573 * Note: Should always be called with the Page Lock held!
575 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
576 struct page *page, unsigned int offset, unsigned int bytes)
578 struct inode *inode = page->mapping->host;
579 struct nfs_inode *nfsi = NFS_I(inode);
580 struct nfs_page *req, *new = NULL;
581 unsigned long rqend, end;
583 end = offset + bytes;
585 if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR))
586 return ERR_PTR(-ERESTARTSYS);
588 /* Loop over all inode entries and see if we find
589 * A request for the page we wish to update
591 spin_lock(&nfsi->req_lock);
592 req = nfs_page_find_request_locked(page);
594 if (!nfs_lock_request_dontget(req)) {
597 spin_unlock(&nfsi->req_lock);
598 error = nfs_wait_on_request(req);
599 nfs_release_request(req);
602 nfs_release_request(new);
603 return ERR_PTR(error);
607 spin_unlock(&nfsi->req_lock);
609 nfs_release_request(new);
615 nfs_lock_request_dontget(new);
616 error = nfs_inode_add_request(inode, new);
618 spin_unlock(&nfsi->req_lock);
619 nfs_unlock_request(new);
620 return ERR_PTR(error);
622 spin_unlock(&nfsi->req_lock);
623 nfs_mark_request_dirty(new);
626 spin_unlock(&nfsi->req_lock);
628 new = nfs_create_request(ctx, inode, page, offset, bytes);
633 /* We have a request for our page.
634 * If the creds don't match, or the
635 * page addresses don't match,
636 * tell the caller to wait on the conflicting
639 rqend = req->wb_offset + req->wb_bytes;
640 if (req->wb_context != ctx
641 || req->wb_page != page
642 || !nfs_dirty_request(req)
643 || offset > rqend || end < req->wb_offset) {
644 nfs_unlock_request(req);
645 return ERR_PTR(-EBUSY);
648 /* Okay, the request matches. Update the region */
649 if (offset < req->wb_offset) {
650 req->wb_offset = offset;
651 req->wb_pgbase = offset;
652 req->wb_bytes = rqend - req->wb_offset;
656 req->wb_bytes = end - req->wb_offset;
661 int nfs_flush_incompatible(struct file *file, struct page *page)
663 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
664 struct nfs_page *req;
665 int do_flush, status;
667 * Look for a request corresponding to this page. If there
668 * is one, and it belongs to another file, we flush it out
669 * before we try to copy anything into the page. Do this
670 * due to the lack of an ACCESS-type call in NFSv2.
671 * Also do the same if we find a request from an existing
675 req = nfs_page_find_request(page);
678 do_flush = req->wb_page != page || req->wb_context != ctx
679 || test_bit(PG_NEED_FLUSH, &req->wb_flags);
680 nfs_release_request(req);
683 status = nfs_wb_page(page->mapping->host, page);
684 } while (status == 0);
689 * Update and possibly write a cached page of an NFS file.
691 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
692 * things with a page scheduled for an RPC call (e.g. invalidate it).
694 int nfs_updatepage(struct file *file, struct page *page,
695 unsigned int offset, unsigned int count)
697 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
698 struct inode *inode = page->mapping->host;
701 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
703 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
704 file->f_dentry->d_parent->d_name.name,
705 file->f_dentry->d_name.name, count,
706 (long long)(page_offset(page) +offset));
708 /* If we're not using byte range locks, and we know the page
709 * is entirely in cache, it may be more efficient to avoid
710 * fragmenting write requests.
712 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
713 count = max(count + offset, nfs_page_length(page));
717 status = nfs_writepage_setup(ctx, page, offset, count);
719 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
720 status, (long long)i_size_read(inode));
722 ClearPageUptodate(page);
726 static void nfs_writepage_release(struct nfs_page *req)
728 end_page_writeback(req->wb_page);
730 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
731 if (!PageError(req->wb_page)) {
732 if (NFS_NEED_RESCHED(req)) {
733 nfs_mark_request_dirty(req);
735 } else if (NFS_NEED_COMMIT(req)) {
736 nfs_mark_request_commit(req);
740 nfs_inode_remove_request(req);
743 nfs_clear_commit(req);
744 nfs_clear_reschedule(req);
746 nfs_inode_remove_request(req);
748 nfs_clear_page_writeback(req);
751 static inline int flush_task_priority(int how)
753 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
755 return RPC_PRIORITY_HIGH;
757 return RPC_PRIORITY_LOW;
759 return RPC_PRIORITY_NORMAL;
763 * Set up the argument/result storage required for the RPC call.
765 static void nfs_write_rpcsetup(struct nfs_page *req,
766 struct nfs_write_data *data,
767 const struct rpc_call_ops *call_ops,
768 unsigned int count, unsigned int offset,
774 /* Set up the RPC argument and reply structs
775 * NB: take care not to mess about with data->commit et al. */
778 data->inode = inode = req->wb_context->dentry->d_inode;
779 data->cred = req->wb_context->cred;
781 data->args.fh = NFS_FH(inode);
782 data->args.offset = req_offset(req) + offset;
783 data->args.pgbase = req->wb_pgbase + offset;
784 data->args.pages = data->pagevec;
785 data->args.count = count;
786 data->args.context = req->wb_context;
788 data->res.fattr = &data->fattr;
789 data->res.count = count;
790 data->res.verf = &data->verf;
791 nfs_fattr_init(&data->fattr);
793 /* Set up the initial task struct. */
794 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
795 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
796 NFS_PROTO(inode)->write_setup(data, how);
798 data->task.tk_priority = flush_task_priority(how);
799 data->task.tk_cookie = (unsigned long)inode;
801 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
804 (long long)NFS_FILEID(inode),
806 (unsigned long long)data->args.offset);
809 static void nfs_execute_write(struct nfs_write_data *data)
811 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
814 rpc_clnt_sigmask(clnt, &oldset);
815 rpc_execute(&data->task);
816 rpc_clnt_sigunmask(clnt, &oldset);
820 * Generate multiple small requests to write out a single
821 * contiguous dirty area on one page.
823 static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
825 struct nfs_page *req = nfs_list_entry(head->next);
826 struct page *page = req->wb_page;
827 struct nfs_write_data *data;
828 size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
833 nfs_list_remove_request(req);
835 nbytes = req->wb_bytes;
837 size_t len = min(nbytes, wsize);
839 data = nfs_writedata_alloc(len);
842 list_add(&data->pages, &list);
845 } while (nbytes != 0);
846 atomic_set(&req->wb_complete, requests);
848 ClearPageError(page);
849 set_page_writeback(page);
851 nbytes = req->wb_bytes;
853 data = list_entry(list.next, struct nfs_write_data, pages);
854 list_del_init(&data->pages);
856 data->pagevec[0] = page;
858 if (nbytes > wsize) {
859 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
864 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
865 nbytes, offset, how);
868 nfs_execute_write(data);
869 } while (nbytes != 0);
874 while (!list_empty(&list)) {
875 data = list_entry(list.next, struct nfs_write_data, pages);
876 list_del(&data->pages);
877 nfs_writedata_release(data);
879 nfs_mark_request_dirty(req);
880 nfs_clear_page_writeback(req);
885 * Create an RPC task for the given write request and kick it.
886 * The page must have been locked by the caller.
888 * It may happen that the page we're passed is not marked dirty.
889 * This is the case if nfs_updatepage detects a conflicting request
890 * that has been written but not committed.
892 static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
894 struct nfs_page *req;
896 struct nfs_write_data *data;
899 data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize);
903 pages = data->pagevec;
905 while (!list_empty(head)) {
906 req = nfs_list_entry(head->next);
907 nfs_list_remove_request(req);
908 nfs_list_add_request(req, &data->pages);
909 ClearPageError(req->wb_page);
910 set_page_writeback(req->wb_page);
911 *pages++ = req->wb_page;
912 count += req->wb_bytes;
914 req = nfs_list_entry(data->pages.next);
916 /* Set up the argument struct */
917 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
919 nfs_execute_write(data);
922 while (!list_empty(head)) {
923 struct nfs_page *req = nfs_list_entry(head->next);
924 nfs_list_remove_request(req);
925 nfs_mark_request_dirty(req);
926 nfs_clear_page_writeback(req);
931 static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
933 LIST_HEAD(one_request);
934 int (*flush_one)(struct inode *, struct list_head *, int);
935 struct nfs_page *req;
936 int wpages = NFS_SERVER(inode)->wpages;
937 int wsize = NFS_SERVER(inode)->wsize;
940 flush_one = nfs_flush_one;
941 if (wsize < PAGE_CACHE_SIZE)
942 flush_one = nfs_flush_multi;
943 /* For single writes, FLUSH_STABLE is more efficient */
944 if (npages <= wpages && npages == NFS_I(inode)->npages
945 && nfs_list_entry(head->next)->wb_bytes <= wsize)
949 nfs_coalesce_requests(head, &one_request, wpages);
950 req = nfs_list_entry(one_request.next);
951 error = flush_one(inode, &one_request, how);
954 } while (!list_empty(head));
957 while (!list_empty(head)) {
958 req = nfs_list_entry(head->next);
959 nfs_list_remove_request(req);
960 nfs_mark_request_dirty(req);
961 nfs_clear_page_writeback(req);
967 * Handle a write reply that flushed part of a page.
969 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
971 struct nfs_write_data *data = calldata;
972 struct nfs_page *req = data->req;
973 struct page *page = req->wb_page;
975 dprintk("NFS: write (%s/%Ld %d@%Ld)",
976 req->wb_context->dentry->d_inode->i_sb->s_id,
977 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
979 (long long)req_offset(req));
981 if (nfs_writeback_done(task, data) != 0)
984 if (task->tk_status < 0) {
985 ClearPageUptodate(page);
987 req->wb_context->error = task->tk_status;
988 dprintk(", error = %d\n", task->tk_status);
990 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
991 if (data->verf.committed < NFS_FILE_SYNC) {
992 if (!NFS_NEED_COMMIT(req)) {
993 nfs_defer_commit(req);
994 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
995 dprintk(" defer commit\n");
996 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
997 nfs_defer_reschedule(req);
998 dprintk(" server reboot detected\n");
1005 if (atomic_dec_and_test(&req->wb_complete))
1006 nfs_writepage_release(req);
1009 static const struct rpc_call_ops nfs_write_partial_ops = {
1010 .rpc_call_done = nfs_writeback_done_partial,
1011 .rpc_release = nfs_writedata_release,
1015 * Handle a write reply that flushes a whole page.
1017 * FIXME: There is an inherent race with invalidate_inode_pages and
1018 * writebacks since the page->count is kept > 1 for as long
1019 * as the page has a write request pending.
1021 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1023 struct nfs_write_data *data = calldata;
1024 struct nfs_page *req;
1027 if (nfs_writeback_done(task, data) != 0)
1030 /* Update attributes as result of writeback. */
1031 while (!list_empty(&data->pages)) {
1032 req = nfs_list_entry(data->pages.next);
1033 nfs_list_remove_request(req);
1034 page = req->wb_page;
1036 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1037 req->wb_context->dentry->d_inode->i_sb->s_id,
1038 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1040 (long long)req_offset(req));
1042 if (task->tk_status < 0) {
1043 ClearPageUptodate(page);
1045 req->wb_context->error = task->tk_status;
1046 end_page_writeback(page);
1047 nfs_inode_remove_request(req);
1048 dprintk(", error = %d\n", task->tk_status);
1051 end_page_writeback(page);
1053 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1054 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1055 nfs_inode_remove_request(req);
1059 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1060 nfs_mark_request_commit(req);
1061 dprintk(" marked for commit\n");
1063 nfs_inode_remove_request(req);
1066 nfs_clear_page_writeback(req);
1070 static const struct rpc_call_ops nfs_write_full_ops = {
1071 .rpc_call_done = nfs_writeback_done_full,
1072 .rpc_release = nfs_writedata_release,
1077 * This function is called when the WRITE call is complete.
1079 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1081 struct nfs_writeargs *argp = &data->args;
1082 struct nfs_writeres *resp = &data->res;
1085 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1086 task->tk_pid, task->tk_status);
1089 * ->write_done will attempt to use post-op attributes to detect
1090 * conflicting writes by other clients. A strict interpretation
1091 * of close-to-open would allow us to continue caching even if
1092 * another writer had changed the file, but some applications
1093 * depend on tighter cache coherency when writing.
1095 status = NFS_PROTO(data->inode)->write_done(task, data);
1098 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1100 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1101 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1102 /* We tried a write call, but the server did not
1103 * commit data to stable storage even though we
1105 * Note: There is a known bug in Tru64 < 5.0 in which
1106 * the server reports NFS_DATA_SYNC, but performs
1107 * NFS_FILE_SYNC. We therefore implement this checking
1108 * as a dprintk() in order to avoid filling syslog.
1110 static unsigned long complain;
1112 if (time_before(complain, jiffies)) {
1113 dprintk("NFS: faulty NFS server %s:"
1114 " (committed = %d) != (stable = %d)\n",
1115 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1116 resp->verf->committed, argp->stable);
1117 complain = jiffies + 300 * HZ;
1121 /* Is this a short write? */
1122 if (task->tk_status >= 0 && resp->count < argp->count) {
1123 static unsigned long complain;
1125 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1127 /* Has the server at least made some progress? */
1128 if (resp->count != 0) {
1129 /* Was this an NFSv2 write or an NFSv3 stable write? */
1130 if (resp->verf->committed != NFS_UNSTABLE) {
1131 /* Resend from where the server left off */
1132 argp->offset += resp->count;
1133 argp->pgbase += resp->count;
1134 argp->count -= resp->count;
1136 /* Resend as a stable write in order to avoid
1137 * headaches in the case of a server crash.
1139 argp->stable = NFS_FILE_SYNC;
1141 rpc_restart_call(task);
1144 if (time_before(complain, jiffies)) {
1146 "NFS: Server wrote zero bytes, expected %u.\n",
1148 complain = jiffies + 300 * HZ;
1150 /* Can't do anything about it except throw an error. */
1151 task->tk_status = -EIO;
1157 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1158 void nfs_commit_release(void *wdata)
1160 nfs_commit_free(wdata);
1164 * Set up the argument/result storage required for the RPC call.
1166 static void nfs_commit_rpcsetup(struct list_head *head,
1167 struct nfs_write_data *data,
1170 struct nfs_page *first;
1171 struct inode *inode;
1174 /* Set up the RPC argument and reply structs
1175 * NB: take care not to mess about with data->commit et al. */
1177 list_splice_init(head, &data->pages);
1178 first = nfs_list_entry(data->pages.next);
1179 inode = first->wb_context->dentry->d_inode;
1181 data->inode = inode;
1182 data->cred = first->wb_context->cred;
1184 data->args.fh = NFS_FH(data->inode);
1185 /* Note: we always request a commit of the entire inode */
1186 data->args.offset = 0;
1187 data->args.count = 0;
1188 data->res.count = 0;
1189 data->res.fattr = &data->fattr;
1190 data->res.verf = &data->verf;
1191 nfs_fattr_init(&data->fattr);
1193 /* Set up the initial task struct. */
1194 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1195 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1196 NFS_PROTO(inode)->commit_setup(data, how);
1198 data->task.tk_priority = flush_task_priority(how);
1199 data->task.tk_cookie = (unsigned long)inode;
1201 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1205 * Commit dirty pages
1208 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1210 struct nfs_write_data *data;
1211 struct nfs_page *req;
1213 data = nfs_commit_alloc();
1218 /* Set up the argument struct */
1219 nfs_commit_rpcsetup(head, data, how);
1221 nfs_execute_write(data);
1224 while (!list_empty(head)) {
1225 req = nfs_list_entry(head->next);
1226 nfs_list_remove_request(req);
1227 nfs_mark_request_commit(req);
1228 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1229 nfs_clear_page_writeback(req);
1235 * COMMIT call returned
1237 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1239 struct nfs_write_data *data = calldata;
1240 struct nfs_page *req;
1242 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1243 task->tk_pid, task->tk_status);
1245 /* Call the NFS version-specific code */
1246 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1249 while (!list_empty(&data->pages)) {
1250 req = nfs_list_entry(data->pages.next);
1251 nfs_list_remove_request(req);
1252 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1254 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1255 req->wb_context->dentry->d_inode->i_sb->s_id,
1256 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1258 (long long)req_offset(req));
1259 if (task->tk_status < 0) {
1260 req->wb_context->error = task->tk_status;
1261 nfs_inode_remove_request(req);
1262 dprintk(", error = %d\n", task->tk_status);
1266 /* Okay, COMMIT succeeded, apparently. Check the verifier
1267 * returned by the server against all stored verfs. */
1268 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1269 /* We have a match */
1270 nfs_inode_remove_request(req);
1274 /* We have a mismatch. Write the page again */
1275 dprintk(" mismatch\n");
1276 nfs_mark_request_dirty(req);
1278 nfs_clear_page_writeback(req);
1282 static const struct rpc_call_ops nfs_commit_ops = {
1283 .rpc_call_done = nfs_commit_done,
1284 .rpc_release = nfs_commit_release,
1287 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1293 static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1295 struct nfs_inode *nfsi = NFS_I(mapping->host);
1299 spin_lock(&nfsi->req_lock);
1300 res = nfs_scan_dirty(mapping, wbc, &head);
1301 spin_unlock(&nfsi->req_lock);
1303 int error = nfs_flush_list(mapping->host, &head, res, how);
1310 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1311 int nfs_commit_inode(struct inode *inode, int how)
1313 struct nfs_inode *nfsi = NFS_I(inode);
1317 spin_lock(&nfsi->req_lock);
1318 res = nfs_scan_commit(inode, &head, 0, 0);
1319 spin_unlock(&nfsi->req_lock);
1321 int error = nfs_commit_list(inode, &head, how);
1329 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1331 struct inode *inode = mapping->host;
1332 struct nfs_inode *nfsi = NFS_I(inode);
1333 unsigned long idx_start, idx_end;
1334 unsigned int npages = 0;
1336 int nocommit = how & FLUSH_NOCOMMIT;
1340 if (wbc->range_cyclic)
1343 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1344 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1345 if (idx_end > idx_start) {
1346 unsigned long l_npages = 1 + idx_end - idx_start;
1348 if (sizeof(npages) != sizeof(l_npages) &&
1349 (unsigned long)npages != l_npages)
1353 how &= ~FLUSH_NOCOMMIT;
1354 spin_lock(&nfsi->req_lock);
1356 wbc->pages_skipped = 0;
1357 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1360 pages = nfs_scan_dirty(mapping, wbc, &head);
1362 spin_unlock(&nfsi->req_lock);
1363 if (how & FLUSH_INVALIDATE) {
1364 nfs_cancel_dirty_list(&head);
1367 ret = nfs_flush_list(inode, &head, pages, how);
1368 spin_lock(&nfsi->req_lock);
1371 if (wbc->pages_skipped != 0)
1375 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1377 if (wbc->pages_skipped != 0)
1381 if (how & FLUSH_INVALIDATE) {
1382 spin_unlock(&nfsi->req_lock);
1383 nfs_cancel_commit_list(&head);
1385 spin_lock(&nfsi->req_lock);
1388 pages += nfs_scan_commit(inode, &head, 0, 0);
1389 spin_unlock(&nfsi->req_lock);
1390 ret = nfs_commit_list(inode, &head, how);
1391 spin_lock(&nfsi->req_lock);
1393 spin_unlock(&nfsi->req_lock);
1398 * flush the inode to disk.
1400 int nfs_wb_all(struct inode *inode)
1402 struct address_space *mapping = inode->i_mapping;
1403 struct writeback_control wbc = {
1404 .bdi = mapping->backing_dev_info,
1405 .sync_mode = WB_SYNC_ALL,
1406 .nr_to_write = LONG_MAX,
1411 ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
1417 int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how)
1419 struct writeback_control wbc = {
1420 .bdi = mapping->backing_dev_info,
1421 .sync_mode = WB_SYNC_ALL,
1422 .nr_to_write = LONG_MAX,
1423 .range_start = range_start,
1424 .range_end = range_end,
1428 ret = nfs_sync_mapping_wait(mapping, &wbc, how);
1434 static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
1436 loff_t range_start = page_offset(page);
1437 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1439 return nfs_sync_mapping_range(inode->i_mapping, range_start, range_end, how | FLUSH_STABLE);
1443 * Write back all requests on one page - we do this before reading it.
1445 int nfs_wb_page(struct inode *inode, struct page* page)
1447 return nfs_wb_page_priority(inode, page, 0);
1450 int nfs_set_page_dirty(struct page *page)
1452 struct nfs_page *req;
1454 req = nfs_page_find_request(page);
1456 /* Mark any existing write requests for flushing */
1457 set_bit(PG_NEED_FLUSH, &req->wb_flags);
1458 nfs_release_request(req);
1460 return __set_page_dirty_nobuffers(page);
1464 int __init nfs_init_writepagecache(void)
1466 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1467 sizeof(struct nfs_write_data),
1468 0, SLAB_HWCACHE_ALIGN,
1470 if (nfs_wdata_cachep == NULL)
1473 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1475 if (nfs_wdata_mempool == NULL)
1478 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1480 if (nfs_commit_mempool == NULL)
1486 void nfs_destroy_writepagecache(void)
1488 mempool_destroy(nfs_commit_mempool);
1489 mempool_destroy(nfs_wdata_mempool);
1490 kmem_cache_destroy(nfs_wdata_cachep);