2 * linux/fs/nfs/pagelist.c
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
27 static struct kmem_cache *nfs_page_cachep;
29 static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
31 p->npages = pagecount;
32 if (pagecount <= ARRAY_SIZE(p->page_array))
33 p->pagevec = p->page_array;
35 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
39 return p->pagevec != NULL;
42 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
43 struct nfs_pgio_header *hdr,
44 void (*release)(struct nfs_pgio_header *hdr))
46 hdr->req = nfs_list_entry(desc->pg_list.next);
47 hdr->inode = desc->pg_inode;
48 hdr->cred = hdr->req->wb_context->cred;
49 hdr->io_start = req_offset(hdr->req);
50 hdr->good_bytes = desc->pg_count;
51 hdr->dreq = desc->pg_dreq;
52 hdr->layout_private = desc->pg_layout_private;
53 hdr->release = release;
54 hdr->completion_ops = desc->pg_completion_ops;
55 if (hdr->completion_ops->init_hdr)
56 hdr->completion_ops->init_hdr(hdr);
58 EXPORT_SYMBOL_GPL(nfs_pgheader_init);
60 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
62 spin_lock(&hdr->lock);
63 if (pos < hdr->io_start + hdr->good_bytes) {
64 set_bit(NFS_IOHDR_ERROR, &hdr->flags);
65 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
66 hdr->good_bytes = pos - hdr->io_start;
69 spin_unlock(&hdr->lock);
72 static inline struct nfs_page *
75 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
77 INIT_LIST_HEAD(&p->wb_list);
82 nfs_page_free(struct nfs_page *p)
84 kmem_cache_free(nfs_page_cachep, p);
88 nfs_iocounter_inc(struct nfs_io_counter *c)
90 atomic_inc(&c->io_count);
94 nfs_iocounter_dec(struct nfs_io_counter *c)
96 if (atomic_dec_and_test(&c->io_count)) {
97 clear_bit(NFS_IO_INPROGRESS, &c->flags);
98 smp_mb__after_clear_bit();
99 wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
104 __nfs_iocounter_wait(struct nfs_io_counter *c)
106 wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
107 DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
111 prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
112 set_bit(NFS_IO_INPROGRESS, &c->flags);
113 if (atomic_read(&c->io_count) == 0)
115 ret = nfs_wait_bit_killable(&c->flags);
116 } while (atomic_read(&c->io_count) != 0);
117 finish_wait(wq, &q.wait);
122 * nfs_iocounter_wait - wait for i/o to complete
123 * @c: nfs_io_counter to use
125 * returns -ERESTARTSYS if interrupted by a fatal signal.
126 * Otherwise returns 0 once the io_count hits 0.
129 nfs_iocounter_wait(struct nfs_io_counter *c)
131 if (atomic_read(&c->io_count) == 0)
133 return __nfs_iocounter_wait(c);
137 * nfs_create_request - Create an NFS read/write request.
138 * @ctx: open context to use
139 * @inode: inode to which the request is attached
140 * @page: page to write
141 * @offset: starting offset within the page for the write
142 * @count: number of bytes to read/write
144 * The page must be locked by the caller. This makes sure we never
145 * create two different requests for the same page.
146 * User should ensure it is safe to sleep in this function.
149 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
151 unsigned int offset, unsigned int count)
153 struct nfs_page *req;
154 struct nfs_lock_context *l_ctx;
156 if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
157 return ERR_PTR(-EBADF);
158 /* try to allocate the request struct */
159 req = nfs_page_alloc();
161 return ERR_PTR(-ENOMEM);
163 /* get lock context early so we can deal with alloc failures */
164 l_ctx = nfs_get_lock_context(ctx);
167 return ERR_CAST(l_ctx);
169 req->wb_lock_context = l_ctx;
170 nfs_iocounter_inc(&l_ctx->io_count);
172 /* Initialize the request struct. Initially, we assume a
173 * long write-back delay. This will be adjusted in
174 * update_nfs_request below if the region is not locked. */
176 req->wb_index = page_file_index(page);
177 page_cache_get(page);
178 req->wb_offset = offset;
179 req->wb_pgbase = offset;
180 req->wb_bytes = count;
181 req->wb_context = get_nfs_open_context(ctx);
182 kref_init(&req->wb_kref);
187 * nfs_unlock_request - Unlock request and wake up sleepers.
190 void nfs_unlock_request(struct nfs_page *req)
192 if (!NFS_WBACK_BUSY(req)) {
193 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
196 smp_mb__before_clear_bit();
197 clear_bit(PG_BUSY, &req->wb_flags);
198 smp_mb__after_clear_bit();
199 wake_up_bit(&req->wb_flags, PG_BUSY);
203 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
206 void nfs_unlock_and_release_request(struct nfs_page *req)
208 nfs_unlock_request(req);
209 nfs_release_request(req);
213 * nfs_clear_request - Free up all resources allocated to the request
216 * Release page and open context resources associated with a read/write
217 * request after it has completed.
219 static void nfs_clear_request(struct nfs_page *req)
221 struct page *page = req->wb_page;
222 struct nfs_open_context *ctx = req->wb_context;
223 struct nfs_lock_context *l_ctx = req->wb_lock_context;
226 page_cache_release(page);
230 nfs_iocounter_dec(&l_ctx->io_count);
231 nfs_put_lock_context(l_ctx);
232 req->wb_lock_context = NULL;
235 put_nfs_open_context(ctx);
236 req->wb_context = NULL;
242 * nfs_release_request - Release the count on an NFS read/write request
243 * @req: request to release
245 * Note: Should never be called with the spinlock held!
247 static void nfs_free_request(struct kref *kref)
249 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
251 /* Release struct file and open context */
252 nfs_clear_request(req);
256 void nfs_release_request(struct nfs_page *req)
258 kref_put(&req->wb_kref, nfs_free_request);
261 static int nfs_wait_bit_uninterruptible(void *word)
268 * nfs_wait_on_request - Wait for a request to complete.
269 * @req: request to wait upon.
271 * Interruptible by fatal signals only.
272 * The user is responsible for holding a count on the request.
275 nfs_wait_on_request(struct nfs_page *req)
277 return wait_on_bit(&req->wb_flags, PG_BUSY,
278 nfs_wait_bit_uninterruptible,
279 TASK_UNINTERRUPTIBLE);
282 bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
285 * FIXME: ideally we should be able to coalesce all requests
286 * that are not block boundary aligned, but currently this
287 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
288 * since nfs_flush_multi and nfs_pagein_multi assume you
289 * can have only one struct nfs_page.
291 if (desc->pg_bsize < PAGE_SIZE)
294 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
296 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
298 static inline struct nfs_rw_header *NFS_RW_HEADER(struct nfs_pgio_header *hdr)
300 return container_of(hdr, struct nfs_rw_header, header);
304 * nfs_rw_header_alloc - Allocate a header for a read or write
305 * @ops: Read or write function vector
307 struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops)
309 struct nfs_rw_header *header = ops->rw_alloc_header();
312 struct nfs_pgio_header *hdr = &header->header;
314 INIT_LIST_HEAD(&hdr->pages);
315 INIT_LIST_HEAD(&hdr->rpc_list);
316 spin_lock_init(&hdr->lock);
317 atomic_set(&hdr->refcnt, 0);
322 EXPORT_SYMBOL_GPL(nfs_rw_header_alloc);
325 * nfs_rw_header_free - Free a read or write header
326 * @hdr: The header to free
328 void nfs_rw_header_free(struct nfs_pgio_header *hdr)
330 hdr->rw_ops->rw_free_header(NFS_RW_HEADER(hdr));
332 EXPORT_SYMBOL_GPL(nfs_rw_header_free);
335 * nfs_pgio_data_alloc - Allocate pageio data
336 * @hdr: The header making a request
337 * @pagecount: Number of pages to create
339 struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr,
340 unsigned int pagecount)
342 struct nfs_pgio_data *data, *prealloc;
344 prealloc = &NFS_RW_HEADER(hdr)->rpc_data;
345 if (prealloc->header == NULL)
348 data = kzalloc(sizeof(*data), GFP_KERNEL);
352 if (nfs_pgarray_set(&data->pages, pagecount)) {
354 atomic_inc(&hdr->refcnt);
356 if (data != prealloc)
365 * nfs_pgio_data_release - Properly free pageio data
366 * @data: The data to release
368 void nfs_pgio_data_release(struct nfs_pgio_data *data)
370 struct nfs_pgio_header *hdr = data->header;
371 struct nfs_rw_header *pageio_header = NFS_RW_HEADER(hdr);
373 put_nfs_open_context(data->args.context);
374 if (data->pages.pagevec != data->pages.page_array)
375 kfree(data->pages.pagevec);
376 if (data == &pageio_header->rpc_data) {
380 if (atomic_dec_and_test(&hdr->refcnt))
381 hdr->completion_ops->completion(hdr);
382 /* Note: we only free the rpc_task after callbacks are done.
383 * See the comment in rpc_free_task() for why
387 EXPORT_SYMBOL_GPL(nfs_pgio_data_release);
390 * nfs_pgio_prepare - Prepare pageio data to go over the wire
391 * @task: The current task
392 * @calldata: pageio data to prepare
394 void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
396 struct nfs_pgio_data *data = calldata;
398 err = NFS_PROTO(data->header->inode)->pgio_rpc_prepare(task, data);
404 * nfs_pgio_release - Release pageio data
405 * @calldata: The pageio data to release
407 void nfs_pgio_release(void *calldata)
409 struct nfs_pgio_data *data = calldata;
410 if (data->header->rw_ops->rw_release)
411 data->header->rw_ops->rw_release(data);
412 nfs_pgio_data_release(data);
416 * nfs_pageio_init - initialise a page io descriptor
417 * @desc: pointer to descriptor
418 * @inode: pointer to inode
419 * @doio: pointer to io function
420 * @bsize: io block size
421 * @io_flags: extra parameters for the io function
423 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
425 const struct nfs_pageio_ops *pg_ops,
426 const struct nfs_pgio_completion_ops *compl_ops,
427 const struct nfs_rw_ops *rw_ops,
431 INIT_LIST_HEAD(&desc->pg_list);
432 desc->pg_bytes_written = 0;
434 desc->pg_bsize = bsize;
437 desc->pg_recoalesce = 0;
438 desc->pg_inode = inode;
439 desc->pg_ops = pg_ops;
440 desc->pg_completion_ops = compl_ops;
441 desc->pg_rw_ops = rw_ops;
442 desc->pg_ioflags = io_flags;
444 desc->pg_lseg = NULL;
445 desc->pg_dreq = NULL;
446 desc->pg_layout_private = NULL;
448 EXPORT_SYMBOL_GPL(nfs_pageio_init);
450 static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
451 const struct nfs_open_context *ctx2)
453 return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state;
456 static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
457 const struct nfs_lock_context *l2)
459 return l1->lockowner.l_owner == l2->lockowner.l_owner
460 && l1->lockowner.l_pid == l2->lockowner.l_pid;
464 * nfs_can_coalesce_requests - test two requests for compatibility
465 * @prev: pointer to nfs_page
466 * @req: pointer to nfs_page
468 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
469 * page data area they describe is contiguous, and that their RPC
470 * credentials, NFSv4 open state, and lockowners are the same.
472 * Return 'true' if this is the case, else return 'false'.
474 static bool nfs_can_coalesce_requests(struct nfs_page *prev,
475 struct nfs_page *req,
476 struct nfs_pageio_descriptor *pgio)
478 if (!nfs_match_open_context(req->wb_context, prev->wb_context))
480 if (req->wb_context->dentry->d_inode->i_flock != NULL &&
481 !nfs_match_lock_context(req->wb_lock_context, prev->wb_lock_context))
483 if (req->wb_pgbase != 0)
485 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
487 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
489 return pgio->pg_ops->pg_test(pgio, prev, req);
493 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
494 * @desc: destination io descriptor
497 * Returns true if the request 'req' was successfully coalesced into the
498 * existing list of pages 'desc'.
500 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
501 struct nfs_page *req)
503 if (desc->pg_count != 0) {
504 struct nfs_page *prev;
506 prev = nfs_list_entry(desc->pg_list.prev);
507 if (!nfs_can_coalesce_requests(prev, req, desc))
510 if (desc->pg_ops->pg_init)
511 desc->pg_ops->pg_init(desc, req);
512 desc->pg_base = req->wb_pgbase;
514 nfs_list_remove_request(req);
515 nfs_list_add_request(req, &desc->pg_list);
516 desc->pg_count += req->wb_bytes;
521 * Helper for nfs_pageio_add_request and nfs_pageio_complete
523 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
525 if (!list_empty(&desc->pg_list)) {
526 int error = desc->pg_ops->pg_doio(desc);
528 desc->pg_error = error;
530 desc->pg_bytes_written += desc->pg_count;
532 if (list_empty(&desc->pg_list)) {
539 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
540 * @desc: destination io descriptor
543 * Returns true if the request 'req' was successfully coalesced into the
544 * existing list of pages 'desc'.
546 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
547 struct nfs_page *req)
549 while (!nfs_pageio_do_add_request(desc, req)) {
551 nfs_pageio_doio(desc);
552 if (desc->pg_error < 0)
555 if (desc->pg_recoalesce)
561 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
566 list_splice_init(&desc->pg_list, &head);
567 desc->pg_bytes_written -= desc->pg_count;
570 desc->pg_recoalesce = 0;
572 while (!list_empty(&head)) {
573 struct nfs_page *req;
575 req = list_first_entry(&head, struct nfs_page, wb_list);
576 nfs_list_remove_request(req);
577 if (__nfs_pageio_add_request(desc, req))
579 if (desc->pg_error < 0)
583 } while (desc->pg_recoalesce);
587 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
588 struct nfs_page *req)
593 ret = __nfs_pageio_add_request(desc, req);
596 if (desc->pg_error < 0)
598 ret = nfs_do_recoalesce(desc);
602 EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
605 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
606 * @desc: pointer to io descriptor
608 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
611 nfs_pageio_doio(desc);
612 if (!desc->pg_recoalesce)
614 if (!nfs_do_recoalesce(desc))
618 EXPORT_SYMBOL_GPL(nfs_pageio_complete);
621 * nfs_pageio_cond_complete - Conditional I/O completion
622 * @desc: pointer to io descriptor
625 * It is important to ensure that processes don't try to take locks
626 * on non-contiguous ranges of pages as that might deadlock. This
627 * function should be called before attempting to wait on a locked
628 * nfs_page. It will complete the I/O if the page index 'index'
629 * is not contiguous with the existing list of pages in 'desc'.
631 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
633 if (!list_empty(&desc->pg_list)) {
634 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
635 if (index != prev->wb_index + 1)
636 nfs_pageio_complete(desc);
640 int __init nfs_init_nfspagecache(void)
642 nfs_page_cachep = kmem_cache_create("nfs_page",
643 sizeof(struct nfs_page),
644 0, SLAB_HWCACHE_ALIGN,
646 if (nfs_page_cachep == NULL)
652 void nfs_destroy_nfspagecache(void)
654 kmem_cache_destroy(nfs_page_cachep);