2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
41 #include "xfs_mount.h"
42 #include "xfs_trace.h"
44 static kmem_zone_t *xfs_buf_zone;
46 static struct workqueue_struct *xfslogd_workqueue;
48 #ifdef XFS_BUF_LOCK_TRACKING
49 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
50 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
51 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
53 # define XB_SET_OWNER(bp) do { } while (0)
54 # define XB_CLEAR_OWNER(bp) do { } while (0)
55 # define XB_GET_OWNER(bp) do { } while (0)
58 #define xb_to_gfp(flags) \
59 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
60 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
62 #define xb_to_km(flags) \
63 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
71 * Return true if the buffer is vmapped.
73 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
74 * code is clever enough to know it doesn't have to map a single page,
75 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
77 return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
84 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
88 * xfs_buf_lru_add - add a buffer to the LRU.
90 * The LRU takes a new reference to the buffer so that it will only be freed
91 * once the shrinker takes the buffer off the LRU.
97 struct xfs_buftarg *btp = bp->b_target;
99 spin_lock(&btp->bt_lru_lock);
100 if (list_empty(&bp->b_lru)) {
101 atomic_inc(&bp->b_hold);
102 list_add_tail(&bp->b_lru, &btp->bt_lru);
105 spin_unlock(&btp->bt_lru_lock);
109 * xfs_buf_lru_del - remove a buffer from the LRU
111 * The unlocked check is safe here because it only occurs when there are not
112 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
113 * to optimise the shrinker removing the buffer from the LRU and calling
114 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
121 struct xfs_buftarg *btp = bp->b_target;
123 if (list_empty(&bp->b_lru))
126 spin_lock(&btp->bt_lru_lock);
127 if (!list_empty(&bp->b_lru)) {
128 list_del_init(&bp->b_lru);
131 spin_unlock(&btp->bt_lru_lock);
135 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
136 * b_lru_ref count so that the buffer is freed immediately when the buffer
137 * reference count falls to zero. If the buffer is already on the LRU, we need
138 * to remove the reference that LRU holds on the buffer.
140 * This prevents build-up of stale buffers on the LRU.
146 ASSERT(xfs_buf_islocked(bp));
148 bp->b_flags |= XBF_STALE;
151 * Clear the delwri status so that a delwri queue walker will not
152 * flush this buffer to disk now that it is stale. The delwri queue has
153 * a reference to the buffer, so this is safe to do.
155 bp->b_flags &= ~_XBF_DELWRI_Q;
157 atomic_set(&(bp)->b_lru_ref, 0);
158 if (!list_empty(&bp->b_lru)) {
159 struct xfs_buftarg *btp = bp->b_target;
161 spin_lock(&btp->bt_lru_lock);
162 if (!list_empty(&bp->b_lru)) {
163 list_del_init(&bp->b_lru);
165 atomic_dec(&bp->b_hold);
167 spin_unlock(&btp->bt_lru_lock);
169 ASSERT(atomic_read(&bp->b_hold) >= 1);
174 struct xfs_buftarg *target,
177 xfs_buf_flags_t flags)
181 bp = kmem_zone_zalloc(xfs_buf_zone, xb_to_km(flags));
186 * We don't want certain flags to appear in b_flags.
188 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
190 atomic_set(&bp->b_hold, 1);
191 atomic_set(&bp->b_lru_ref, 1);
192 init_completion(&bp->b_iowait);
193 INIT_LIST_HEAD(&bp->b_lru);
194 INIT_LIST_HEAD(&bp->b_list);
195 RB_CLEAR_NODE(&bp->b_rbnode);
196 sema_init(&bp->b_sema, 0); /* held, no waiters */
198 bp->b_target = target;
201 * Set length and io_length to the same value initially.
202 * I/O routines should use io_length, which will be the same in
203 * most cases but may be reset (e.g. XFS recovery).
205 bp->b_length = numblks;
206 bp->b_io_length = numblks;
210 * We do not set the block number here in the buffer because we have not
211 * finished initialising the buffer. We insert the buffer into the cache
212 * in this state, so this ensures that we are unable to do IO on a
213 * buffer that hasn't been fully initialised.
215 bp->b_bn = XFS_BUF_DADDR_NULL;
216 atomic_set(&bp->b_pin_count, 0);
217 init_waitqueue_head(&bp->b_waiters);
219 XFS_STATS_INC(xb_create);
220 trace_xfs_buf_init(bp, _RET_IP_);
226 * Allocate a page array capable of holding a specified number
227 * of pages, and point the page buf at it.
233 xfs_buf_flags_t flags)
235 /* Make sure that we have a page list */
236 if (bp->b_pages == NULL) {
237 bp->b_page_count = page_count;
238 if (page_count <= XB_PAGES) {
239 bp->b_pages = bp->b_page_array;
241 bp->b_pages = kmem_alloc(sizeof(struct page *) *
242 page_count, xb_to_km(flags));
243 if (bp->b_pages == NULL)
246 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
252 * Frees b_pages if it was allocated.
258 if (bp->b_pages != bp->b_page_array) {
259 kmem_free(bp->b_pages);
265 * Releases the specified buffer.
267 * The modification state of any associated pages is left unchanged.
268 * The buffer most not be on any hash - use xfs_buf_rele instead for
269 * hashed and refcounted buffers
275 trace_xfs_buf_free(bp, _RET_IP_);
277 ASSERT(list_empty(&bp->b_lru));
279 if (bp->b_flags & _XBF_PAGES) {
282 if (xfs_buf_is_vmapped(bp))
283 vm_unmap_ram(bp->b_addr - bp->b_offset,
286 for (i = 0; i < bp->b_page_count; i++) {
287 struct page *page = bp->b_pages[i];
291 } else if (bp->b_flags & _XBF_KMEM)
292 kmem_free(bp->b_addr);
293 _xfs_buf_free_pages(bp);
294 kmem_zone_free(xfs_buf_zone, bp);
298 * Allocates all the pages for buffer in question and builds it's page list.
301 xfs_buf_allocate_memory(
306 size_t nbytes, offset;
307 gfp_t gfp_mask = xb_to_gfp(flags);
308 unsigned short page_count, i;
313 * for buffers that are contained within a single page, just allocate
314 * the memory from the heap - there's no need for the complexity of
315 * page arrays to keep allocation down to order 0.
317 if (bp->b_length < BTOBB(PAGE_SIZE)) {
318 bp->b_addr = kmem_alloc(BBTOB(bp->b_length), xb_to_km(flags));
320 /* low memory - use alloc_page loop instead */
324 if (((unsigned long)(bp->b_addr + BBTOB(bp->b_length) - 1) &
326 ((unsigned long)bp->b_addr & PAGE_MASK)) {
327 /* b_addr spans two pages - use alloc_page instead */
328 kmem_free(bp->b_addr);
332 bp->b_offset = offset_in_page(bp->b_addr);
333 bp->b_pages = bp->b_page_array;
334 bp->b_pages[0] = virt_to_page(bp->b_addr);
335 bp->b_page_count = 1;
336 bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
341 end = BBTOB(bp->b_bn + bp->b_length);
342 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(BBTOB(bp->b_bn));
343 error = _xfs_buf_get_pages(bp, page_count, flags);
347 offset = bp->b_offset;
348 size = BBTOB(bp->b_length);
349 bp->b_flags |= _XBF_PAGES;
351 for (i = 0; i < bp->b_page_count; i++) {
355 page = alloc_page(gfp_mask);
356 if (unlikely(page == NULL)) {
357 if (flags & XBF_READ_AHEAD) {
358 bp->b_page_count = i;
364 * This could deadlock.
366 * But until all the XFS lowlevel code is revamped to
367 * handle buffer allocation failures we can't do much.
369 if (!(++retries % 100))
371 "possible memory allocation deadlock in %s (mode:0x%x)",
374 XFS_STATS_INC(xb_page_retries);
375 congestion_wait(BLK_RW_ASYNC, HZ/50);
379 XFS_STATS_INC(xb_page_found);
381 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
383 bp->b_pages[i] = page;
389 for (i = 0; i < bp->b_page_count; i++)
390 __free_page(bp->b_pages[i]);
395 * Map buffer into kernel address-space if necessary.
402 ASSERT(bp->b_flags & _XBF_PAGES);
403 if (bp->b_page_count == 1) {
404 /* A single page buffer is always mappable */
405 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
406 bp->b_flags |= XBF_MAPPED;
407 } else if (flags & XBF_MAPPED) {
411 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
416 } while (retried++ <= 1);
420 bp->b_addr += bp->b_offset;
421 bp->b_flags |= XBF_MAPPED;
428 * Finding and Reading Buffers
432 * Look up, and creates if absent, a lockable buffer for
433 * a given range of an inode. The buffer is returned
434 * locked. No I/O is implied by this call.
438 struct xfs_buftarg *btp,
441 xfs_buf_flags_t flags,
445 struct xfs_perag *pag;
446 struct rb_node **rbp;
447 struct rb_node *parent;
450 numbytes = BBTOB(numblks);
452 /* Check for IOs smaller than the sector size / not sector aligned */
453 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
454 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
457 pag = xfs_perag_get(btp->bt_mount,
458 xfs_daddr_to_agno(btp->bt_mount, blkno));
461 spin_lock(&pag->pag_buf_lock);
462 rbp = &pag->pag_buf_tree.rb_node;
467 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
469 if (blkno < bp->b_bn)
470 rbp = &(*rbp)->rb_left;
471 else if (blkno > bp->b_bn)
472 rbp = &(*rbp)->rb_right;
475 * found a block number match. If the range doesn't
476 * match, the only way this is allowed is if the buffer
477 * in the cache is stale and the transaction that made
478 * it stale has not yet committed. i.e. we are
479 * reallocating a busy extent. Skip this buffer and
480 * continue searching to the right for an exact match.
482 if (bp->b_length != numblks) {
483 ASSERT(bp->b_flags & XBF_STALE);
484 rbp = &(*rbp)->rb_right;
487 atomic_inc(&bp->b_hold);
494 rb_link_node(&new_bp->b_rbnode, parent, rbp);
495 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
496 /* the buffer keeps the perag reference until it is freed */
498 spin_unlock(&pag->pag_buf_lock);
500 XFS_STATS_INC(xb_miss_locked);
501 spin_unlock(&pag->pag_buf_lock);
507 spin_unlock(&pag->pag_buf_lock);
510 if (!xfs_buf_trylock(bp)) {
511 if (flags & XBF_TRYLOCK) {
513 XFS_STATS_INC(xb_busy_locked);
517 XFS_STATS_INC(xb_get_locked_waited);
521 * if the buffer is stale, clear all the external state associated with
522 * it. We need to keep flags such as how we allocated the buffer memory
525 if (bp->b_flags & XBF_STALE) {
526 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
527 bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
530 trace_xfs_buf_find(bp, flags, _RET_IP_);
531 XFS_STATS_INC(xb_get_locked);
536 * Assembles a buffer covering the specified range. The code is optimised for
537 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
538 * more hits than misses.
542 xfs_buftarg_t *target,
545 xfs_buf_flags_t flags)
548 struct xfs_buf *new_bp;
551 bp = _xfs_buf_find(target, blkno, numblks, flags, NULL);
555 new_bp = xfs_buf_alloc(target, blkno, numblks, flags);
556 if (unlikely(!new_bp))
559 error = xfs_buf_allocate_memory(new_bp, flags);
561 kmem_zone_free(xfs_buf_zone, new_bp);
565 bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp);
567 xfs_buf_free(new_bp);
572 xfs_buf_free(new_bp);
575 * Now we have a workable buffer, fill in the block number so
576 * that we can do IO on it.
579 bp->b_io_length = bp->b_length;
582 if (!(bp->b_flags & XBF_MAPPED)) {
583 error = _xfs_buf_map_pages(bp, flags);
584 if (unlikely(error)) {
585 xfs_warn(target->bt_mount,
586 "%s: failed to map pages\n", __func__);
591 XFS_STATS_INC(xb_get);
592 trace_xfs_buf_get(bp, flags, _RET_IP_);
596 if (flags & (XBF_LOCK | XBF_TRYLOCK))
605 xfs_buf_flags_t flags)
607 ASSERT(!(flags & XBF_WRITE));
608 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
610 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
611 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
613 xfs_buf_iorequest(bp);
614 if (flags & XBF_ASYNC)
616 return xfs_buf_iowait(bp);
621 xfs_buftarg_t *target,
624 xfs_buf_flags_t flags)
630 bp = xfs_buf_get(target, blkno, numblks, flags);
632 trace_xfs_buf_read(bp, flags, _RET_IP_);
634 if (!XFS_BUF_ISDONE(bp)) {
635 XFS_STATS_INC(xb_get_read);
636 _xfs_buf_read(bp, flags);
637 } else if (flags & XBF_ASYNC) {
639 * Read ahead call which is already satisfied,
644 /* We do not want read in the flags */
645 bp->b_flags &= ~XBF_READ;
652 if (flags & (XBF_LOCK | XBF_TRYLOCK))
659 * If we are not low on memory then do the readahead in a deadlock
664 xfs_buftarg_t *target,
668 if (bdi_read_congested(target->bt_bdi))
671 xfs_buf_read(target, blkno, numblks,
672 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
676 * Read an uncached buffer from disk. Allocates and returns a locked
677 * buffer containing the disk contents or nothing.
680 xfs_buf_read_uncached(
681 struct xfs_buftarg *target,
689 bp = xfs_buf_get_uncached(target, numblks, flags);
693 /* set up the buffer for a read IO */
694 XFS_BUF_SET_ADDR(bp, daddr);
697 xfsbdstrat(target->bt_mount, bp);
698 error = xfs_buf_iowait(bp);
707 * Return a buffer allocated as an empty buffer and associated to external
708 * memory via xfs_buf_associate_memory() back to it's empty state.
716 _xfs_buf_free_pages(bp);
719 bp->b_page_count = 0;
721 bp->b_length = numblks;
722 bp->b_io_length = numblks;
723 bp->b_bn = XFS_BUF_DADDR_NULL;
724 bp->b_flags &= ~XBF_MAPPED;
727 static inline struct page *
731 if ((!is_vmalloc_addr(addr))) {
732 return virt_to_page(addr);
734 return vmalloc_to_page(addr);
739 xfs_buf_associate_memory(
746 unsigned long pageaddr;
747 unsigned long offset;
751 pageaddr = (unsigned long)mem & PAGE_MASK;
752 offset = (unsigned long)mem - pageaddr;
753 buflen = PAGE_ALIGN(len + offset);
754 page_count = buflen >> PAGE_SHIFT;
756 /* Free any previous set of page pointers */
758 _xfs_buf_free_pages(bp);
763 rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
767 bp->b_offset = offset;
769 for (i = 0; i < bp->b_page_count; i++) {
770 bp->b_pages[i] = mem_to_page((void *)pageaddr);
771 pageaddr += PAGE_SIZE;
774 bp->b_io_length = BTOBB(len);
775 bp->b_length = BTOBB(buflen);
776 bp->b_flags |= XBF_MAPPED;
782 xfs_buf_get_uncached(
783 struct xfs_buftarg *target,
787 unsigned long page_count;
791 bp = xfs_buf_alloc(target, 0, numblks, 0);
792 if (unlikely(bp == NULL))
795 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
796 error = _xfs_buf_get_pages(bp, page_count, 0);
800 for (i = 0; i < page_count; i++) {
801 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
805 bp->b_flags |= _XBF_PAGES;
807 error = _xfs_buf_map_pages(bp, XBF_MAPPED);
808 if (unlikely(error)) {
809 xfs_warn(target->bt_mount,
810 "%s: failed to map pages\n", __func__);
814 trace_xfs_buf_get_uncached(bp, _RET_IP_);
819 __free_page(bp->b_pages[i]);
820 _xfs_buf_free_pages(bp);
822 kmem_zone_free(xfs_buf_zone, bp);
828 * Increment reference count on buffer, to hold the buffer concurrently
829 * with another thread which may release (free) the buffer asynchronously.
830 * Must hold the buffer already to call this function.
836 trace_xfs_buf_hold(bp, _RET_IP_);
837 atomic_inc(&bp->b_hold);
841 * Releases a hold on the specified buffer. If the
842 * the hold count is 1, calls xfs_buf_free.
848 struct xfs_perag *pag = bp->b_pag;
850 trace_xfs_buf_rele(bp, _RET_IP_);
853 ASSERT(list_empty(&bp->b_lru));
854 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
855 if (atomic_dec_and_test(&bp->b_hold))
860 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
862 ASSERT(atomic_read(&bp->b_hold) > 0);
863 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
864 if (!(bp->b_flags & XBF_STALE) &&
865 atomic_read(&bp->b_lru_ref)) {
867 spin_unlock(&pag->pag_buf_lock);
870 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
871 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
872 spin_unlock(&pag->pag_buf_lock);
881 * Lock a buffer object, if it is not already locked.
883 * If we come across a stale, pinned, locked buffer, we know that we are
884 * being asked to lock a buffer that has been reallocated. Because it is
885 * pinned, we know that the log has not been pushed to disk and hence it
886 * will still be locked. Rather than continuing to have trylock attempts
887 * fail until someone else pushes the log, push it ourselves before
888 * returning. This means that the xfsaild will not get stuck trying
889 * to push on stale inode buffers.
897 locked = down_trylock(&bp->b_sema) == 0;
900 else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
901 xfs_log_force(bp->b_target->bt_mount, 0);
903 trace_xfs_buf_trylock(bp, _RET_IP_);
908 * Lock a buffer object.
910 * If we come across a stale, pinned, locked buffer, we know that we
911 * are being asked to lock a buffer that has been reallocated. Because
912 * it is pinned, we know that the log has not been pushed to disk and
913 * hence it will still be locked. Rather than sleeping until someone
914 * else pushes the log, push it ourselves before trying to get the lock.
920 trace_xfs_buf_lock(bp, _RET_IP_);
922 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
923 xfs_log_force(bp->b_target->bt_mount, 0);
927 trace_xfs_buf_lock_done(bp, _RET_IP_);
937 trace_xfs_buf_unlock(bp, _RET_IP_);
944 DECLARE_WAITQUEUE (wait, current);
946 if (atomic_read(&bp->b_pin_count) == 0)
949 add_wait_queue(&bp->b_waiters, &wait);
951 set_current_state(TASK_UNINTERRUPTIBLE);
952 if (atomic_read(&bp->b_pin_count) == 0)
956 remove_wait_queue(&bp->b_waiters, &wait);
957 set_current_state(TASK_RUNNING);
961 * Buffer Utility Routines
966 struct work_struct *work)
969 container_of(work, xfs_buf_t, b_iodone_work);
972 (*(bp->b_iodone))(bp);
973 else if (bp->b_flags & XBF_ASYNC)
982 trace_xfs_buf_iodone(bp, _RET_IP_);
984 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
985 if (bp->b_error == 0)
986 bp->b_flags |= XBF_DONE;
988 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
990 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
991 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
993 xfs_buf_iodone_work(&bp->b_iodone_work);
996 complete(&bp->b_iowait);
1005 ASSERT(error >= 0 && error <= 0xffff);
1006 bp->b_error = (unsigned short)error;
1007 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1011 xfs_buf_ioerror_alert(
1015 xfs_alert(bp->b_target->bt_mount,
1016 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1017 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1026 ASSERT(xfs_buf_islocked(bp));
1028 bp->b_flags |= XBF_WRITE;
1029 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1033 error = xfs_buf_iowait(bp);
1035 xfs_force_shutdown(bp->b_target->bt_mount,
1036 SHUTDOWN_META_IO_ERROR);
1042 * Called when we want to stop a buffer from getting written or read.
1043 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1044 * so that the proper iodone callbacks get called.
1050 #ifdef XFSERRORDEBUG
1051 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1055 * No need to wait until the buffer is unpinned, we aren't flushing it.
1057 xfs_buf_ioerror(bp, EIO);
1060 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1066 xfs_buf_ioend(bp, 0);
1072 * Same as xfs_bioerror, except that we are releasing the buffer
1073 * here ourselves, and avoiding the xfs_buf_ioend call.
1074 * This is meant for userdata errors; metadata bufs come with
1075 * iodone functions attached, so that we can track down errors.
1081 int64_t fl = bp->b_flags;
1083 * No need to wait until the buffer is unpinned.
1084 * We aren't flushing it.
1086 * chunkhold expects B_DONE to be set, whether
1087 * we actually finish the I/O or not. We don't want to
1088 * change that interface.
1093 bp->b_iodone = NULL;
1094 if (!(fl & XBF_ASYNC)) {
1096 * Mark b_error and B_ERROR _both_.
1097 * Lot's of chunkcache code assumes that.
1098 * There's no reason to mark error for
1101 xfs_buf_ioerror(bp, EIO);
1102 complete(&bp->b_iowait);
1112 * All xfs metadata buffers except log state machine buffers
1113 * get this attached as their b_bdstrat callback function.
1114 * This is so that we can catch a buffer
1115 * after prematurely unpinning it to forcibly shutdown the filesystem.
1121 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1122 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1124 * Metadata write that didn't get logged but
1125 * written delayed anyway. These aren't associated
1126 * with a transaction, and can be ignored.
1128 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1129 return xfs_bioerror_relse(bp);
1131 return xfs_bioerror(bp);
1134 xfs_buf_iorequest(bp);
1139 * Wrapper around bdstrat so that we can stop data from going to disk in case
1140 * we are shutting down the filesystem. Typically user data goes thru this
1141 * path; one of the exceptions is the superblock.
1145 struct xfs_mount *mp,
1148 if (XFS_FORCED_SHUTDOWN(mp)) {
1149 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1150 xfs_bioerror_relse(bp);
1154 xfs_buf_iorequest(bp);
1162 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1163 xfs_buf_ioend(bp, schedule);
1171 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1173 xfs_buf_ioerror(bp, -error);
1175 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1176 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1178 _xfs_buf_ioend(bp, 1);
1186 int rw, map_i, total_nr_pages, nr_pages;
1188 int offset = bp->b_offset;
1189 int size = BBTOB(bp->b_io_length);
1190 sector_t sector = bp->b_bn;
1192 total_nr_pages = bp->b_page_count;
1195 if (bp->b_flags & XBF_WRITE) {
1196 if (bp->b_flags & XBF_SYNCIO)
1200 if (bp->b_flags & XBF_FUA)
1202 if (bp->b_flags & XBF_FLUSH)
1204 } else if (bp->b_flags & XBF_READ_AHEAD) {
1210 /* we only use the buffer cache for meta-data */
1214 atomic_inc(&bp->b_io_remaining);
1215 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1216 if (nr_pages > total_nr_pages)
1217 nr_pages = total_nr_pages;
1219 bio = bio_alloc(GFP_NOIO, nr_pages);
1220 bio->bi_bdev = bp->b_target->bt_bdev;
1221 bio->bi_sector = sector;
1222 bio->bi_end_io = xfs_buf_bio_end_io;
1223 bio->bi_private = bp;
1226 for (; size && nr_pages; nr_pages--, map_i++) {
1227 int rbytes, nbytes = PAGE_SIZE - offset;
1232 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1233 if (rbytes < nbytes)
1237 sector += BTOBB(nbytes);
1242 if (likely(bio->bi_size)) {
1243 if (xfs_buf_is_vmapped(bp)) {
1244 flush_kernel_vmap_range(bp->b_addr,
1245 xfs_buf_vmap_len(bp));
1247 submit_bio(rw, bio);
1251 xfs_buf_ioerror(bp, EIO);
1260 trace_xfs_buf_iorequest(bp, _RET_IP_);
1262 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1264 if (bp->b_flags & XBF_WRITE)
1265 xfs_buf_wait_unpin(bp);
1268 /* Set the count to 1 initially, this will stop an I/O
1269 * completion callout which happens before we have started
1270 * all the I/O from calling xfs_buf_ioend too early.
1272 atomic_set(&bp->b_io_remaining, 1);
1273 _xfs_buf_ioapply(bp);
1274 _xfs_buf_ioend(bp, 0);
1280 * Waits for I/O to complete on the buffer supplied. It returns immediately if
1281 * no I/O is pending or there is already a pending error on the buffer. It
1282 * returns the I/O error code, if any, or 0 if there was no error.
1288 trace_xfs_buf_iowait(bp, _RET_IP_);
1291 wait_for_completion(&bp->b_iowait);
1293 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1304 if (bp->b_flags & XBF_MAPPED)
1305 return bp->b_addr + offset;
1307 offset += bp->b_offset;
1308 page = bp->b_pages[offset >> PAGE_SHIFT];
1309 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1313 * Move data into or out of a buffer.
1317 xfs_buf_t *bp, /* buffer to process */
1318 size_t boff, /* starting buffer offset */
1319 size_t bsize, /* length to copy */
1320 void *data, /* data address */
1321 xfs_buf_rw_t mode) /* read/write/zero flag */
1323 size_t bend, cpoff, csize;
1326 bend = boff + bsize;
1327 while (boff < bend) {
1328 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1329 cpoff = xfs_buf_poff(boff + bp->b_offset);
1330 csize = min_t(size_t,
1331 PAGE_SIZE - cpoff, BBTOB(bp->b_io_length) - boff);
1333 ASSERT(((csize + cpoff) <= PAGE_SIZE));
1337 memset(page_address(page) + cpoff, 0, csize);
1340 memcpy(data, page_address(page) + cpoff, csize);
1343 memcpy(page_address(page) + cpoff, data, csize);
1352 * Handling of buffer targets (buftargs).
1356 * Wait for any bufs with callbacks that have been submitted but have not yet
1357 * returned. These buffers will have an elevated hold count, so wait on those
1358 * while freeing all the buffers only held by the LRU.
1362 struct xfs_buftarg *btp)
1367 spin_lock(&btp->bt_lru_lock);
1368 while (!list_empty(&btp->bt_lru)) {
1369 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1370 if (atomic_read(&bp->b_hold) > 1) {
1371 spin_unlock(&btp->bt_lru_lock);
1376 * clear the LRU reference count so the buffer doesn't get
1377 * ignored in xfs_buf_rele().
1379 atomic_set(&bp->b_lru_ref, 0);
1380 spin_unlock(&btp->bt_lru_lock);
1382 spin_lock(&btp->bt_lru_lock);
1384 spin_unlock(&btp->bt_lru_lock);
1389 struct shrinker *shrink,
1390 struct shrink_control *sc)
1392 struct xfs_buftarg *btp = container_of(shrink,
1393 struct xfs_buftarg, bt_shrinker);
1395 int nr_to_scan = sc->nr_to_scan;
1399 return btp->bt_lru_nr;
1401 spin_lock(&btp->bt_lru_lock);
1402 while (!list_empty(&btp->bt_lru)) {
1403 if (nr_to_scan-- <= 0)
1406 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1409 * Decrement the b_lru_ref count unless the value is already
1410 * zero. If the value is already zero, we need to reclaim the
1411 * buffer, otherwise it gets another trip through the LRU.
1413 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1414 list_move_tail(&bp->b_lru, &btp->bt_lru);
1419 * remove the buffer from the LRU now to avoid needing another
1420 * lock round trip inside xfs_buf_rele().
1422 list_move(&bp->b_lru, &dispose);
1425 spin_unlock(&btp->bt_lru_lock);
1427 while (!list_empty(&dispose)) {
1428 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1429 list_del_init(&bp->b_lru);
1433 return btp->bt_lru_nr;
1438 struct xfs_mount *mp,
1439 struct xfs_buftarg *btp)
1441 unregister_shrinker(&btp->bt_shrinker);
1443 if (mp->m_flags & XFS_MOUNT_BARRIER)
1444 xfs_blkdev_issue_flush(btp);
1450 xfs_setsize_buftarg_flags(
1452 unsigned int blocksize,
1453 unsigned int sectorsize,
1456 btp->bt_bsize = blocksize;
1457 btp->bt_sshift = ffs(sectorsize) - 1;
1458 btp->bt_smask = sectorsize - 1;
1460 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1461 char name[BDEVNAME_SIZE];
1463 bdevname(btp->bt_bdev, name);
1465 xfs_warn(btp->bt_mount,
1466 "Cannot set_blocksize to %u on device %s\n",
1475 * When allocating the initial buffer target we have not yet
1476 * read in the superblock, so don't know what sized sectors
1477 * are being used is at this early stage. Play safe.
1480 xfs_setsize_buftarg_early(
1482 struct block_device *bdev)
1484 return xfs_setsize_buftarg_flags(btp,
1485 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1489 xfs_setsize_buftarg(
1491 unsigned int blocksize,
1492 unsigned int sectorsize)
1494 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1499 struct xfs_mount *mp,
1500 struct block_device *bdev,
1506 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1509 btp->bt_dev = bdev->bd_dev;
1510 btp->bt_bdev = bdev;
1511 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1515 INIT_LIST_HEAD(&btp->bt_lru);
1516 spin_lock_init(&btp->bt_lru_lock);
1517 if (xfs_setsize_buftarg_early(btp, bdev))
1519 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1520 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1521 register_shrinker(&btp->bt_shrinker);
1530 * Add a buffer to the delayed write list.
1532 * This queues a buffer for writeout if it hasn't already been. Note that
1533 * neither this routine nor the buffer list submission functions perform
1534 * any internal synchronization. It is expected that the lists are thread-local
1537 * Returns true if we queued up the buffer, or false if it already had
1538 * been on the buffer list.
1541 xfs_buf_delwri_queue(
1543 struct list_head *list)
1545 ASSERT(xfs_buf_islocked(bp));
1546 ASSERT(!(bp->b_flags & XBF_READ));
1549 * If the buffer is already marked delwri it already is queued up
1550 * by someone else for imediate writeout. Just ignore it in that
1553 if (bp->b_flags & _XBF_DELWRI_Q) {
1554 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1558 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1561 * If a buffer gets written out synchronously or marked stale while it
1562 * is on a delwri list we lazily remove it. To do this, the other party
1563 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1564 * It remains referenced and on the list. In a rare corner case it
1565 * might get readded to a delwri list after the synchronous writeout, in
1566 * which case we need just need to re-add the flag here.
1568 bp->b_flags |= _XBF_DELWRI_Q;
1569 if (list_empty(&bp->b_list)) {
1570 atomic_inc(&bp->b_hold);
1571 list_add_tail(&bp->b_list, list);
1578 * Compare function is more complex than it needs to be because
1579 * the return value is only 32 bits and we are doing comparisons
1585 struct list_head *a,
1586 struct list_head *b)
1588 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1589 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1592 diff = ap->b_bn - bp->b_bn;
1601 __xfs_buf_delwri_submit(
1602 struct list_head *buffer_list,
1603 struct list_head *io_list,
1606 struct blk_plug plug;
1607 struct xfs_buf *bp, *n;
1610 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1612 if (xfs_buf_ispinned(bp)) {
1616 if (!xfs_buf_trylock(bp))
1623 * Someone else might have written the buffer synchronously or
1624 * marked it stale in the meantime. In that case only the
1625 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1626 * reference and remove it from the list here.
1628 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1629 list_del_init(&bp->b_list);
1634 list_move_tail(&bp->b_list, io_list);
1635 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1638 list_sort(NULL, io_list, xfs_buf_cmp);
1640 blk_start_plug(&plug);
1641 list_for_each_entry_safe(bp, n, io_list, b_list) {
1642 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1643 bp->b_flags |= XBF_WRITE;
1646 bp->b_flags |= XBF_ASYNC;
1647 list_del_init(&bp->b_list);
1651 blk_finish_plug(&plug);
1657 * Write out a buffer list asynchronously.
1659 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1660 * out and not wait for I/O completion on any of the buffers. This interface
1661 * is only safely useable for callers that can track I/O completion by higher
1662 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1666 xfs_buf_delwri_submit_nowait(
1667 struct list_head *buffer_list)
1669 LIST_HEAD (io_list);
1670 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1674 * Write out a buffer list synchronously.
1676 * This will take the @buffer_list, write all buffers out and wait for I/O
1677 * completion on all of the buffers. @buffer_list is consumed by the function,
1678 * so callers must have some other way of tracking buffers if they require such
1682 xfs_buf_delwri_submit(
1683 struct list_head *buffer_list)
1685 LIST_HEAD (io_list);
1686 int error = 0, error2;
1689 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1691 /* Wait for IO to complete. */
1692 while (!list_empty(&io_list)) {
1693 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1695 list_del_init(&bp->b_list);
1696 error2 = xfs_buf_iowait(bp);
1708 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1709 KM_ZONE_HWALIGN, NULL);
1713 xfslogd_workqueue = alloc_workqueue("xfslogd",
1714 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1715 if (!xfslogd_workqueue)
1716 goto out_free_buf_zone;
1721 kmem_zone_destroy(xfs_buf_zone);
1727 xfs_buf_terminate(void)
1729 destroy_workqueue(xfslogd_workqueue);
1730 kmem_zone_destroy(xfs_buf_zone);