[PATCH] Remove readv/writev methods and use aio_read/aio_write instead
[firefly-linux-kernel-4.4.55.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include <linux/stddef.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/vmalloc.h>
24 #include <linux/bio.h>
25 #include <linux/sysctl.h>
26 #include <linux/proc_fs.h>
27 #include <linux/workqueue.h>
28 #include <linux/percpu.h>
29 #include <linux/blkdev.h>
30 #include <linux/hash.h>
31 #include <linux/kthread.h>
32 #include <linux/migrate.h>
33 #include "xfs_linux.h"
34
35 STATIC kmem_zone_t *xfs_buf_zone;
36 STATIC kmem_shaker_t xfs_buf_shake;
37 STATIC int xfsbufd(void *);
38 STATIC int xfsbufd_wakeup(int, gfp_t);
39 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
40
41 STATIC struct workqueue_struct *xfslogd_workqueue;
42 struct workqueue_struct *xfsdatad_workqueue;
43
44 #ifdef XFS_BUF_TRACE
45 void
46 xfs_buf_trace(
47         xfs_buf_t       *bp,
48         char            *id,
49         void            *data,
50         void            *ra)
51 {
52         ktrace_enter(xfs_buf_trace_buf,
53                 bp, id,
54                 (void *)(unsigned long)bp->b_flags,
55                 (void *)(unsigned long)bp->b_hold.counter,
56                 (void *)(unsigned long)bp->b_sema.count.counter,
57                 (void *)current,
58                 data, ra,
59                 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
60                 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
61                 (void *)(unsigned long)bp->b_buffer_length,
62                 NULL, NULL, NULL, NULL, NULL);
63 }
64 ktrace_t *xfs_buf_trace_buf;
65 #define XFS_BUF_TRACE_SIZE      4096
66 #define XB_TRACE(bp, id, data)  \
67         xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
68 #else
69 #define XB_TRACE(bp, id, data)  do { } while (0)
70 #endif
71
72 #ifdef XFS_BUF_LOCK_TRACKING
73 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
74 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
75 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
76 #else
77 # define XB_SET_OWNER(bp)       do { } while (0)
78 # define XB_CLEAR_OWNER(bp)     do { } while (0)
79 # define XB_GET_OWNER(bp)       do { } while (0)
80 #endif
81
82 #define xb_to_gfp(flags) \
83         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
84           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
85
86 #define xb_to_km(flags) \
87          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
88
89 #define xfs_buf_allocate(flags) \
90         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
91 #define xfs_buf_deallocate(bp) \
92         kmem_zone_free(xfs_buf_zone, (bp));
93
94 /*
95  *      Page Region interfaces.
96  *
97  *      For pages in filesystems where the blocksize is smaller than the
98  *      pagesize, we use the page->private field (long) to hold a bitmap
99  *      of uptodate regions within the page.
100  *
101  *      Each such region is "bytes per page / bits per long" bytes long.
102  *
103  *      NBPPR == number-of-bytes-per-page-region
104  *      BTOPR == bytes-to-page-region (rounded up)
105  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
106  */
107 #if (BITS_PER_LONG == 32)
108 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
109 #elif (BITS_PER_LONG == 64)
110 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
111 #else
112 #error BITS_PER_LONG must be 32 or 64
113 #endif
114 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
115 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
116 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
117
118 STATIC unsigned long
119 page_region_mask(
120         size_t          offset,
121         size_t          length)
122 {
123         unsigned long   mask;
124         int             first, final;
125
126         first = BTOPR(offset);
127         final = BTOPRT(offset + length - 1);
128         first = min(first, final);
129
130         mask = ~0UL;
131         mask <<= BITS_PER_LONG - (final - first);
132         mask >>= BITS_PER_LONG - (final);
133
134         ASSERT(offset + length <= PAGE_CACHE_SIZE);
135         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
136
137         return mask;
138 }
139
140 STATIC inline void
141 set_page_region(
142         struct page     *page,
143         size_t          offset,
144         size_t          length)
145 {
146         set_page_private(page,
147                 page_private(page) | page_region_mask(offset, length));
148         if (page_private(page) == ~0UL)
149                 SetPageUptodate(page);
150 }
151
152 STATIC inline int
153 test_page_region(
154         struct page     *page,
155         size_t          offset,
156         size_t          length)
157 {
158         unsigned long   mask = page_region_mask(offset, length);
159
160         return (mask && (page_private(page) & mask) == mask);
161 }
162
163 /*
164  *      Mapping of multi-page buffers into contiguous virtual space
165  */
166
167 typedef struct a_list {
168         void            *vm_addr;
169         struct a_list   *next;
170 } a_list_t;
171
172 STATIC a_list_t         *as_free_head;
173 STATIC int              as_list_len;
174 STATIC DEFINE_SPINLOCK(as_lock);
175
176 /*
177  *      Try to batch vunmaps because they are costly.
178  */
179 STATIC void
180 free_address(
181         void            *addr)
182 {
183         a_list_t        *aentry;
184
185         aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
186         if (likely(aentry)) {
187                 spin_lock(&as_lock);
188                 aentry->next = as_free_head;
189                 aentry->vm_addr = addr;
190                 as_free_head = aentry;
191                 as_list_len++;
192                 spin_unlock(&as_lock);
193         } else {
194                 vunmap(addr);
195         }
196 }
197
198 STATIC void
199 purge_addresses(void)
200 {
201         a_list_t        *aentry, *old;
202
203         if (as_free_head == NULL)
204                 return;
205
206         spin_lock(&as_lock);
207         aentry = as_free_head;
208         as_free_head = NULL;
209         as_list_len = 0;
210         spin_unlock(&as_lock);
211
212         while ((old = aentry) != NULL) {
213                 vunmap(aentry->vm_addr);
214                 aentry = aentry->next;
215                 kfree(old);
216         }
217 }
218
219 /*
220  *      Internal xfs_buf_t object manipulation
221  */
222
223 STATIC void
224 _xfs_buf_initialize(
225         xfs_buf_t               *bp,
226         xfs_buftarg_t           *target,
227         xfs_off_t               range_base,
228         size_t                  range_length,
229         xfs_buf_flags_t         flags)
230 {
231         /*
232          * We don't want certain flags to appear in b_flags.
233          */
234         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
235
236         memset(bp, 0, sizeof(xfs_buf_t));
237         atomic_set(&bp->b_hold, 1);
238         init_MUTEX_LOCKED(&bp->b_iodonesema);
239         INIT_LIST_HEAD(&bp->b_list);
240         INIT_LIST_HEAD(&bp->b_hash_list);
241         init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
242         XB_SET_OWNER(bp);
243         bp->b_target = target;
244         bp->b_file_offset = range_base;
245         /*
246          * Set buffer_length and count_desired to the same value initially.
247          * I/O routines should use count_desired, which will be the same in
248          * most cases but may be reset (e.g. XFS recovery).
249          */
250         bp->b_buffer_length = bp->b_count_desired = range_length;
251         bp->b_flags = flags;
252         bp->b_bn = XFS_BUF_DADDR_NULL;
253         atomic_set(&bp->b_pin_count, 0);
254         init_waitqueue_head(&bp->b_waiters);
255
256         XFS_STATS_INC(xb_create);
257         XB_TRACE(bp, "initialize", target);
258 }
259
260 /*
261  *      Allocate a page array capable of holding a specified number
262  *      of pages, and point the page buf at it.
263  */
264 STATIC int
265 _xfs_buf_get_pages(
266         xfs_buf_t               *bp,
267         int                     page_count,
268         xfs_buf_flags_t         flags)
269 {
270         /* Make sure that we have a page list */
271         if (bp->b_pages == NULL) {
272                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
273                 bp->b_page_count = page_count;
274                 if (page_count <= XB_PAGES) {
275                         bp->b_pages = bp->b_page_array;
276                 } else {
277                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
278                                         page_count, xb_to_km(flags));
279                         if (bp->b_pages == NULL)
280                                 return -ENOMEM;
281                 }
282                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
283         }
284         return 0;
285 }
286
287 /*
288  *      Frees b_pages if it was allocated.
289  */
290 STATIC void
291 _xfs_buf_free_pages(
292         xfs_buf_t       *bp)
293 {
294         if (bp->b_pages != bp->b_page_array) {
295                 kmem_free(bp->b_pages,
296                           bp->b_page_count * sizeof(struct page *));
297         }
298 }
299
300 /*
301  *      Releases the specified buffer.
302  *
303  *      The modification state of any associated pages is left unchanged.
304  *      The buffer most not be on any hash - use xfs_buf_rele instead for
305  *      hashed and refcounted buffers
306  */
307 void
308 xfs_buf_free(
309         xfs_buf_t               *bp)
310 {
311         XB_TRACE(bp, "free", 0);
312
313         ASSERT(list_empty(&bp->b_hash_list));
314
315         if (bp->b_flags & _XBF_PAGE_CACHE) {
316                 uint            i;
317
318                 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
319                         free_address(bp->b_addr - bp->b_offset);
320
321                 for (i = 0; i < bp->b_page_count; i++) {
322                         struct page     *page = bp->b_pages[i];
323
324                         ASSERT(!PagePrivate(page));
325                         page_cache_release(page);
326                 }
327                 _xfs_buf_free_pages(bp);
328         } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
329                  /*
330                   * XXX(hch): bp->b_count_desired might be incorrect (see
331                   * xfs_buf_associate_memory for details), but fortunately
332                   * the Linux version of kmem_free ignores the len argument..
333                   */
334                 kmem_free(bp->b_addr, bp->b_count_desired);
335                 _xfs_buf_free_pages(bp);
336         }
337
338         xfs_buf_deallocate(bp);
339 }
340
341 /*
342  *      Finds all pages for buffer in question and builds it's page list.
343  */
344 STATIC int
345 _xfs_buf_lookup_pages(
346         xfs_buf_t               *bp,
347         uint                    flags)
348 {
349         struct address_space    *mapping = bp->b_target->bt_mapping;
350         size_t                  blocksize = bp->b_target->bt_bsize;
351         size_t                  size = bp->b_count_desired;
352         size_t                  nbytes, offset;
353         gfp_t                   gfp_mask = xb_to_gfp(flags);
354         unsigned short          page_count, i;
355         pgoff_t                 first;
356         xfs_off_t               end;
357         int                     error;
358
359         end = bp->b_file_offset + bp->b_buffer_length;
360         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
361
362         error = _xfs_buf_get_pages(bp, page_count, flags);
363         if (unlikely(error))
364                 return error;
365         bp->b_flags |= _XBF_PAGE_CACHE;
366
367         offset = bp->b_offset;
368         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
369
370         for (i = 0; i < bp->b_page_count; i++) {
371                 struct page     *page;
372                 uint            retries = 0;
373
374               retry:
375                 page = find_or_create_page(mapping, first + i, gfp_mask);
376                 if (unlikely(page == NULL)) {
377                         if (flags & XBF_READ_AHEAD) {
378                                 bp->b_page_count = i;
379                                 for (i = 0; i < bp->b_page_count; i++)
380                                         unlock_page(bp->b_pages[i]);
381                                 return -ENOMEM;
382                         }
383
384                         /*
385                          * This could deadlock.
386                          *
387                          * But until all the XFS lowlevel code is revamped to
388                          * handle buffer allocation failures we can't do much.
389                          */
390                         if (!(++retries % 100))
391                                 printk(KERN_ERR
392                                         "XFS: possible memory allocation "
393                                         "deadlock in %s (mode:0x%x)\n",
394                                         __FUNCTION__, gfp_mask);
395
396                         XFS_STATS_INC(xb_page_retries);
397                         xfsbufd_wakeup(0, gfp_mask);
398                         blk_congestion_wait(WRITE, HZ/50);
399                         goto retry;
400                 }
401
402                 XFS_STATS_INC(xb_page_found);
403
404                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
405                 size -= nbytes;
406
407                 ASSERT(!PagePrivate(page));
408                 if (!PageUptodate(page)) {
409                         page_count--;
410                         if (blocksize >= PAGE_CACHE_SIZE) {
411                                 if (flags & XBF_READ)
412                                         bp->b_locked = 1;
413                         } else if (!PagePrivate(page)) {
414                                 if (test_page_region(page, offset, nbytes))
415                                         page_count++;
416                         }
417                 }
418
419                 bp->b_pages[i] = page;
420                 offset = 0;
421         }
422
423         if (!bp->b_locked) {
424                 for (i = 0; i < bp->b_page_count; i++)
425                         unlock_page(bp->b_pages[i]);
426         }
427
428         if (page_count == bp->b_page_count)
429                 bp->b_flags |= XBF_DONE;
430
431         XB_TRACE(bp, "lookup_pages", (long)page_count);
432         return error;
433 }
434
435 /*
436  *      Map buffer into kernel address-space if nessecary.
437  */
438 STATIC int
439 _xfs_buf_map_pages(
440         xfs_buf_t               *bp,
441         uint                    flags)
442 {
443         /* A single page buffer is always mappable */
444         if (bp->b_page_count == 1) {
445                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
446                 bp->b_flags |= XBF_MAPPED;
447         } else if (flags & XBF_MAPPED) {
448                 if (as_list_len > 64)
449                         purge_addresses();
450                 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
451                                         VM_MAP, PAGE_KERNEL);
452                 if (unlikely(bp->b_addr == NULL))
453                         return -ENOMEM;
454                 bp->b_addr += bp->b_offset;
455                 bp->b_flags |= XBF_MAPPED;
456         }
457
458         return 0;
459 }
460
461 /*
462  *      Finding and Reading Buffers
463  */
464
465 /*
466  *      Look up, and creates if absent, a lockable buffer for
467  *      a given range of an inode.  The buffer is returned
468  *      locked.  If other overlapping buffers exist, they are
469  *      released before the new buffer is created and locked,
470  *      which may imply that this call will block until those buffers
471  *      are unlocked.  No I/O is implied by this call.
472  */
473 xfs_buf_t *
474 _xfs_buf_find(
475         xfs_buftarg_t           *btp,   /* block device target          */
476         xfs_off_t               ioff,   /* starting offset of range     */
477         size_t                  isize,  /* length of range              */
478         xfs_buf_flags_t         flags,
479         xfs_buf_t               *new_bp)
480 {
481         xfs_off_t               range_base;
482         size_t                  range_length;
483         xfs_bufhash_t           *hash;
484         xfs_buf_t               *bp, *n;
485
486         range_base = (ioff << BBSHIFT);
487         range_length = (isize << BBSHIFT);
488
489         /* Check for IOs smaller than the sector size / not sector aligned */
490         ASSERT(!(range_length < (1 << btp->bt_sshift)));
491         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
492
493         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
494
495         spin_lock(&hash->bh_lock);
496
497         list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
498                 ASSERT(btp == bp->b_target);
499                 if (bp->b_file_offset == range_base &&
500                     bp->b_buffer_length == range_length) {
501                         /*
502                          * If we look at something, bring it to the
503                          * front of the list for next time.
504                          */
505                         atomic_inc(&bp->b_hold);
506                         list_move(&bp->b_hash_list, &hash->bh_list);
507                         goto found;
508                 }
509         }
510
511         /* No match found */
512         if (new_bp) {
513                 _xfs_buf_initialize(new_bp, btp, range_base,
514                                 range_length, flags);
515                 new_bp->b_hash = hash;
516                 list_add(&new_bp->b_hash_list, &hash->bh_list);
517         } else {
518                 XFS_STATS_INC(xb_miss_locked);
519         }
520
521         spin_unlock(&hash->bh_lock);
522         return new_bp;
523
524 found:
525         spin_unlock(&hash->bh_lock);
526
527         /* Attempt to get the semaphore without sleeping,
528          * if this does not work then we need to drop the
529          * spinlock and do a hard attempt on the semaphore.
530          */
531         if (down_trylock(&bp->b_sema)) {
532                 if (!(flags & XBF_TRYLOCK)) {
533                         /* wait for buffer ownership */
534                         XB_TRACE(bp, "get_lock", 0);
535                         xfs_buf_lock(bp);
536                         XFS_STATS_INC(xb_get_locked_waited);
537                 } else {
538                         /* We asked for a trylock and failed, no need
539                          * to look at file offset and length here, we
540                          * know that this buffer at least overlaps our
541                          * buffer and is locked, therefore our buffer
542                          * either does not exist, or is this buffer.
543                          */
544                         xfs_buf_rele(bp);
545                         XFS_STATS_INC(xb_busy_locked);
546                         return NULL;
547                 }
548         } else {
549                 /* trylock worked */
550                 XB_SET_OWNER(bp);
551         }
552
553         if (bp->b_flags & XBF_STALE) {
554                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
555                 bp->b_flags &= XBF_MAPPED;
556         }
557         XB_TRACE(bp, "got_lock", 0);
558         XFS_STATS_INC(xb_get_locked);
559         return bp;
560 }
561
562 /*
563  *      Assembles a buffer covering the specified range.
564  *      Storage in memory for all portions of the buffer will be allocated,
565  *      although backing storage may not be.
566  */
567 xfs_buf_t *
568 xfs_buf_get_flags(
569         xfs_buftarg_t           *target,/* target for buffer            */
570         xfs_off_t               ioff,   /* starting offset of range     */
571         size_t                  isize,  /* length of range              */
572         xfs_buf_flags_t         flags)
573 {
574         xfs_buf_t               *bp, *new_bp;
575         int                     error = 0, i;
576
577         new_bp = xfs_buf_allocate(flags);
578         if (unlikely(!new_bp))
579                 return NULL;
580
581         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
582         if (bp == new_bp) {
583                 error = _xfs_buf_lookup_pages(bp, flags);
584                 if (error)
585                         goto no_buffer;
586         } else {
587                 xfs_buf_deallocate(new_bp);
588                 if (unlikely(bp == NULL))
589                         return NULL;
590         }
591
592         for (i = 0; i < bp->b_page_count; i++)
593                 mark_page_accessed(bp->b_pages[i]);
594
595         if (!(bp->b_flags & XBF_MAPPED)) {
596                 error = _xfs_buf_map_pages(bp, flags);
597                 if (unlikely(error)) {
598                         printk(KERN_WARNING "%s: failed to map pages\n",
599                                         __FUNCTION__);
600                         goto no_buffer;
601                 }
602         }
603
604         XFS_STATS_INC(xb_get);
605
606         /*
607          * Always fill in the block number now, the mapped cases can do
608          * their own overlay of this later.
609          */
610         bp->b_bn = ioff;
611         bp->b_count_desired = bp->b_buffer_length;
612
613         XB_TRACE(bp, "get", (unsigned long)flags);
614         return bp;
615
616  no_buffer:
617         if (flags & (XBF_LOCK | XBF_TRYLOCK))
618                 xfs_buf_unlock(bp);
619         xfs_buf_rele(bp);
620         return NULL;
621 }
622
623 xfs_buf_t *
624 xfs_buf_read_flags(
625         xfs_buftarg_t           *target,
626         xfs_off_t               ioff,
627         size_t                  isize,
628         xfs_buf_flags_t         flags)
629 {
630         xfs_buf_t               *bp;
631
632         flags |= XBF_READ;
633
634         bp = xfs_buf_get_flags(target, ioff, isize, flags);
635         if (bp) {
636                 if (!XFS_BUF_ISDONE(bp)) {
637                         XB_TRACE(bp, "read", (unsigned long)flags);
638                         XFS_STATS_INC(xb_get_read);
639                         xfs_buf_iostart(bp, flags);
640                 } else if (flags & XBF_ASYNC) {
641                         XB_TRACE(bp, "read_async", (unsigned long)flags);
642                         /*
643                          * Read ahead call which is already satisfied,
644                          * drop the buffer
645                          */
646                         goto no_buffer;
647                 } else {
648                         XB_TRACE(bp, "read_done", (unsigned long)flags);
649                         /* We do not want read in the flags */
650                         bp->b_flags &= ~XBF_READ;
651                 }
652         }
653
654         return bp;
655
656  no_buffer:
657         if (flags & (XBF_LOCK | XBF_TRYLOCK))
658                 xfs_buf_unlock(bp);
659         xfs_buf_rele(bp);
660         return NULL;
661 }
662
663 /*
664  *      If we are not low on memory then do the readahead in a deadlock
665  *      safe manner.
666  */
667 void
668 xfs_buf_readahead(
669         xfs_buftarg_t           *target,
670         xfs_off_t               ioff,
671         size_t                  isize,
672         xfs_buf_flags_t         flags)
673 {
674         struct backing_dev_info *bdi;
675
676         bdi = target->bt_mapping->backing_dev_info;
677         if (bdi_read_congested(bdi))
678                 return;
679
680         flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
681         xfs_buf_read_flags(target, ioff, isize, flags);
682 }
683
684 xfs_buf_t *
685 xfs_buf_get_empty(
686         size_t                  len,
687         xfs_buftarg_t           *target)
688 {
689         xfs_buf_t               *bp;
690
691         bp = xfs_buf_allocate(0);
692         if (bp)
693                 _xfs_buf_initialize(bp, target, 0, len, 0);
694         return bp;
695 }
696
697 static inline struct page *
698 mem_to_page(
699         void                    *addr)
700 {
701         if (((unsigned long)addr < VMALLOC_START) ||
702             ((unsigned long)addr >= VMALLOC_END)) {
703                 return virt_to_page(addr);
704         } else {
705                 return vmalloc_to_page(addr);
706         }
707 }
708
709 int
710 xfs_buf_associate_memory(
711         xfs_buf_t               *bp,
712         void                    *mem,
713         size_t                  len)
714 {
715         int                     rval;
716         int                     i = 0;
717         size_t                  ptr;
718         size_t                  end, end_cur;
719         off_t                   offset;
720         int                     page_count;
721
722         page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
723         offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
724         if (offset && (len > PAGE_CACHE_SIZE))
725                 page_count++;
726
727         /* Free any previous set of page pointers */
728         if (bp->b_pages)
729                 _xfs_buf_free_pages(bp);
730
731         bp->b_pages = NULL;
732         bp->b_addr = mem;
733
734         rval = _xfs_buf_get_pages(bp, page_count, 0);
735         if (rval)
736                 return rval;
737
738         bp->b_offset = offset;
739         ptr = (size_t) mem & PAGE_CACHE_MASK;
740         end = PAGE_CACHE_ALIGN((size_t) mem + len);
741         end_cur = end;
742         /* set up first page */
743         bp->b_pages[0] = mem_to_page(mem);
744
745         ptr += PAGE_CACHE_SIZE;
746         bp->b_page_count = ++i;
747         while (ptr < end) {
748                 bp->b_pages[i] = mem_to_page((void *)ptr);
749                 bp->b_page_count = ++i;
750                 ptr += PAGE_CACHE_SIZE;
751         }
752         bp->b_locked = 0;
753
754         bp->b_count_desired = bp->b_buffer_length = len;
755         bp->b_flags |= XBF_MAPPED;
756
757         return 0;
758 }
759
760 xfs_buf_t *
761 xfs_buf_get_noaddr(
762         size_t                  len,
763         xfs_buftarg_t           *target)
764 {
765         size_t                  malloc_len = len;
766         xfs_buf_t               *bp;
767         void                    *data;
768         int                     error;
769
770         bp = xfs_buf_allocate(0);
771         if (unlikely(bp == NULL))
772                 goto fail;
773         _xfs_buf_initialize(bp, target, 0, len, 0);
774
775  try_again:
776         data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
777         if (unlikely(data == NULL))
778                 goto fail_free_buf;
779
780         /* check whether alignment matches.. */
781         if ((__psunsigned_t)data !=
782             ((__psunsigned_t)data & ~target->bt_smask)) {
783                 /* .. else double the size and try again */
784                 kmem_free(data, malloc_len);
785                 malloc_len <<= 1;
786                 goto try_again;
787         }
788
789         error = xfs_buf_associate_memory(bp, data, len);
790         if (error)
791                 goto fail_free_mem;
792         bp->b_flags |= _XBF_KMEM_ALLOC;
793
794         xfs_buf_unlock(bp);
795
796         XB_TRACE(bp, "no_daddr", data);
797         return bp;
798  fail_free_mem:
799         kmem_free(data, malloc_len);
800  fail_free_buf:
801         xfs_buf_free(bp);
802  fail:
803         return NULL;
804 }
805
806 /*
807  *      Increment reference count on buffer, to hold the buffer concurrently
808  *      with another thread which may release (free) the buffer asynchronously.
809  *      Must hold the buffer already to call this function.
810  */
811 void
812 xfs_buf_hold(
813         xfs_buf_t               *bp)
814 {
815         atomic_inc(&bp->b_hold);
816         XB_TRACE(bp, "hold", 0);
817 }
818
819 /*
820  *      Releases a hold on the specified buffer.  If the
821  *      the hold count is 1, calls xfs_buf_free.
822  */
823 void
824 xfs_buf_rele(
825         xfs_buf_t               *bp)
826 {
827         xfs_bufhash_t           *hash = bp->b_hash;
828
829         XB_TRACE(bp, "rele", bp->b_relse);
830
831         if (unlikely(!hash)) {
832                 ASSERT(!bp->b_relse);
833                 if (atomic_dec_and_test(&bp->b_hold))
834                         xfs_buf_free(bp);
835                 return;
836         }
837
838         if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
839                 if (bp->b_relse) {
840                         atomic_inc(&bp->b_hold);
841                         spin_unlock(&hash->bh_lock);
842                         (*(bp->b_relse)) (bp);
843                 } else if (bp->b_flags & XBF_FS_MANAGED) {
844                         spin_unlock(&hash->bh_lock);
845                 } else {
846                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
847                         list_del_init(&bp->b_hash_list);
848                         spin_unlock(&hash->bh_lock);
849                         xfs_buf_free(bp);
850                 }
851         } else {
852                 /*
853                  * Catch reference count leaks
854                  */
855                 ASSERT(atomic_read(&bp->b_hold) >= 0);
856         }
857 }
858
859
860 /*
861  *      Mutual exclusion on buffers.  Locking model:
862  *
863  *      Buffers associated with inodes for which buffer locking
864  *      is not enabled are not protected by semaphores, and are
865  *      assumed to be exclusively owned by the caller.  There is a
866  *      spinlock in the buffer, used by the caller when concurrent
867  *      access is possible.
868  */
869
870 /*
871  *      Locks a buffer object, if it is not already locked.
872  *      Note that this in no way locks the underlying pages, so it is only
873  *      useful for synchronizing concurrent use of buffer objects, not for
874  *      synchronizing independent access to the underlying pages.
875  */
876 int
877 xfs_buf_cond_lock(
878         xfs_buf_t               *bp)
879 {
880         int                     locked;
881
882         locked = down_trylock(&bp->b_sema) == 0;
883         if (locked) {
884                 XB_SET_OWNER(bp);
885         }
886         XB_TRACE(bp, "cond_lock", (long)locked);
887         return locked ? 0 : -EBUSY;
888 }
889
890 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
891 int
892 xfs_buf_lock_value(
893         xfs_buf_t               *bp)
894 {
895         return atomic_read(&bp->b_sema.count);
896 }
897 #endif
898
899 /*
900  *      Locks a buffer object.
901  *      Note that this in no way locks the underlying pages, so it is only
902  *      useful for synchronizing concurrent use of buffer objects, not for
903  *      synchronizing independent access to the underlying pages.
904  */
905 void
906 xfs_buf_lock(
907         xfs_buf_t               *bp)
908 {
909         XB_TRACE(bp, "lock", 0);
910         if (atomic_read(&bp->b_io_remaining))
911                 blk_run_address_space(bp->b_target->bt_mapping);
912         down(&bp->b_sema);
913         XB_SET_OWNER(bp);
914         XB_TRACE(bp, "locked", 0);
915 }
916
917 /*
918  *      Releases the lock on the buffer object.
919  *      If the buffer is marked delwri but is not queued, do so before we
920  *      unlock the buffer as we need to set flags correctly.  We also need to
921  *      take a reference for the delwri queue because the unlocker is going to
922  *      drop their's and they don't know we just queued it.
923  */
924 void
925 xfs_buf_unlock(
926         xfs_buf_t               *bp)
927 {
928         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
929                 atomic_inc(&bp->b_hold);
930                 bp->b_flags |= XBF_ASYNC;
931                 xfs_buf_delwri_queue(bp, 0);
932         }
933
934         XB_CLEAR_OWNER(bp);
935         up(&bp->b_sema);
936         XB_TRACE(bp, "unlock", 0);
937 }
938
939
940 /*
941  *      Pinning Buffer Storage in Memory
942  *      Ensure that no attempt to force a buffer to disk will succeed.
943  */
944 void
945 xfs_buf_pin(
946         xfs_buf_t               *bp)
947 {
948         atomic_inc(&bp->b_pin_count);
949         XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
950 }
951
952 void
953 xfs_buf_unpin(
954         xfs_buf_t               *bp)
955 {
956         if (atomic_dec_and_test(&bp->b_pin_count))
957                 wake_up_all(&bp->b_waiters);
958         XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
959 }
960
961 int
962 xfs_buf_ispin(
963         xfs_buf_t               *bp)
964 {
965         return atomic_read(&bp->b_pin_count);
966 }
967
968 STATIC void
969 xfs_buf_wait_unpin(
970         xfs_buf_t               *bp)
971 {
972         DECLARE_WAITQUEUE       (wait, current);
973
974         if (atomic_read(&bp->b_pin_count) == 0)
975                 return;
976
977         add_wait_queue(&bp->b_waiters, &wait);
978         for (;;) {
979                 set_current_state(TASK_UNINTERRUPTIBLE);
980                 if (atomic_read(&bp->b_pin_count) == 0)
981                         break;
982                 if (atomic_read(&bp->b_io_remaining))
983                         blk_run_address_space(bp->b_target->bt_mapping);
984                 schedule();
985         }
986         remove_wait_queue(&bp->b_waiters, &wait);
987         set_current_state(TASK_RUNNING);
988 }
989
990 /*
991  *      Buffer Utility Routines
992  */
993
994 STATIC void
995 xfs_buf_iodone_work(
996         void                    *v)
997 {
998         xfs_buf_t               *bp = (xfs_buf_t *)v;
999
1000         if (bp->b_iodone)
1001                 (*(bp->b_iodone))(bp);
1002         else if (bp->b_flags & XBF_ASYNC)
1003                 xfs_buf_relse(bp);
1004 }
1005
1006 void
1007 xfs_buf_ioend(
1008         xfs_buf_t               *bp,
1009         int                     schedule)
1010 {
1011         bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1012         if (bp->b_error == 0)
1013                 bp->b_flags |= XBF_DONE;
1014
1015         XB_TRACE(bp, "iodone", bp->b_iodone);
1016
1017         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1018                 if (schedule) {
1019                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1020                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1021                 } else {
1022                         xfs_buf_iodone_work(bp);
1023                 }
1024         } else {
1025                 up(&bp->b_iodonesema);
1026         }
1027 }
1028
1029 void
1030 xfs_buf_ioerror(
1031         xfs_buf_t               *bp,
1032         int                     error)
1033 {
1034         ASSERT(error >= 0 && error <= 0xffff);
1035         bp->b_error = (unsigned short)error;
1036         XB_TRACE(bp, "ioerror", (unsigned long)error);
1037 }
1038
1039 /*
1040  *      Initiate I/O on a buffer, based on the flags supplied.
1041  *      The b_iodone routine in the buffer supplied will only be called
1042  *      when all of the subsidiary I/O requests, if any, have been completed.
1043  */
1044 int
1045 xfs_buf_iostart(
1046         xfs_buf_t               *bp,
1047         xfs_buf_flags_t         flags)
1048 {
1049         int                     status = 0;
1050
1051         XB_TRACE(bp, "iostart", (unsigned long)flags);
1052
1053         if (flags & XBF_DELWRI) {
1054                 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1055                 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1056                 xfs_buf_delwri_queue(bp, 1);
1057                 return status;
1058         }
1059
1060         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1061                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1062         bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1063                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1064
1065         BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1066
1067         /* For writes allow an alternate strategy routine to precede
1068          * the actual I/O request (which may not be issued at all in
1069          * a shutdown situation, for example).
1070          */
1071         status = (flags & XBF_WRITE) ?
1072                 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1073
1074         /* Wait for I/O if we are not an async request.
1075          * Note: async I/O request completion will release the buffer,
1076          * and that can already be done by this point.  So using the
1077          * buffer pointer from here on, after async I/O, is invalid.
1078          */
1079         if (!status && !(flags & XBF_ASYNC))
1080                 status = xfs_buf_iowait(bp);
1081
1082         return status;
1083 }
1084
1085 STATIC __inline__ int
1086 _xfs_buf_iolocked(
1087         xfs_buf_t               *bp)
1088 {
1089         ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1090         if (bp->b_flags & XBF_READ)
1091                 return bp->b_locked;
1092         return 0;
1093 }
1094
1095 STATIC __inline__ void
1096 _xfs_buf_ioend(
1097         xfs_buf_t               *bp,
1098         int                     schedule)
1099 {
1100         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1101                 bp->b_locked = 0;
1102                 xfs_buf_ioend(bp, schedule);
1103         }
1104 }
1105
1106 STATIC int
1107 xfs_buf_bio_end_io(
1108         struct bio              *bio,
1109         unsigned int            bytes_done,
1110         int                     error)
1111 {
1112         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1113         unsigned int            blocksize = bp->b_target->bt_bsize;
1114         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1115
1116         if (bio->bi_size)
1117                 return 1;
1118
1119         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1120                 bp->b_error = EIO;
1121
1122         do {
1123                 struct page     *page = bvec->bv_page;
1124
1125                 ASSERT(!PagePrivate(page));
1126                 if (unlikely(bp->b_error)) {
1127                         if (bp->b_flags & XBF_READ)
1128                                 ClearPageUptodate(page);
1129                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1130                         SetPageUptodate(page);
1131                 } else if (!PagePrivate(page) &&
1132                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1133                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1134                 }
1135
1136                 if (--bvec >= bio->bi_io_vec)
1137                         prefetchw(&bvec->bv_page->flags);
1138
1139                 if (_xfs_buf_iolocked(bp)) {
1140                         unlock_page(page);
1141                 }
1142         } while (bvec >= bio->bi_io_vec);
1143
1144         _xfs_buf_ioend(bp, 1);
1145         bio_put(bio);
1146         return 0;
1147 }
1148
1149 STATIC void
1150 _xfs_buf_ioapply(
1151         xfs_buf_t               *bp)
1152 {
1153         int                     i, rw, map_i, total_nr_pages, nr_pages;
1154         struct bio              *bio;
1155         int                     offset = bp->b_offset;
1156         int                     size = bp->b_count_desired;
1157         sector_t                sector = bp->b_bn;
1158         unsigned int            blocksize = bp->b_target->bt_bsize;
1159         int                     locking = _xfs_buf_iolocked(bp);
1160
1161         total_nr_pages = bp->b_page_count;
1162         map_i = 0;
1163
1164         if (bp->b_flags & XBF_ORDERED) {
1165                 ASSERT(!(bp->b_flags & XBF_READ));
1166                 rw = WRITE_BARRIER;
1167         } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1168                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1169                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1170                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1171         } else {
1172                 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1173                      (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1174         }
1175
1176         /* Special code path for reading a sub page size buffer in --
1177          * we populate up the whole page, and hence the other metadata
1178          * in the same page.  This optimization is only valid when the
1179          * filesystem block size is not smaller than the page size.
1180          */
1181         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1182             (bp->b_flags & XBF_READ) && locking &&
1183             (blocksize >= PAGE_CACHE_SIZE)) {
1184                 bio = bio_alloc(GFP_NOIO, 1);
1185
1186                 bio->bi_bdev = bp->b_target->bt_bdev;
1187                 bio->bi_sector = sector - (offset >> BBSHIFT);
1188                 bio->bi_end_io = xfs_buf_bio_end_io;
1189                 bio->bi_private = bp;
1190
1191                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1192                 size = 0;
1193
1194                 atomic_inc(&bp->b_io_remaining);
1195
1196                 goto submit_io;
1197         }
1198
1199         /* Lock down the pages which we need to for the request */
1200         if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1201                 for (i = 0; size; i++) {
1202                         int             nbytes = PAGE_CACHE_SIZE - offset;
1203                         struct page     *page = bp->b_pages[i];
1204
1205                         if (nbytes > size)
1206                                 nbytes = size;
1207
1208                         lock_page(page);
1209
1210                         size -= nbytes;
1211                         offset = 0;
1212                 }
1213                 offset = bp->b_offset;
1214                 size = bp->b_count_desired;
1215         }
1216
1217 next_chunk:
1218         atomic_inc(&bp->b_io_remaining);
1219         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1220         if (nr_pages > total_nr_pages)
1221                 nr_pages = total_nr_pages;
1222
1223         bio = bio_alloc(GFP_NOIO, nr_pages);
1224         bio->bi_bdev = bp->b_target->bt_bdev;
1225         bio->bi_sector = sector;
1226         bio->bi_end_io = xfs_buf_bio_end_io;
1227         bio->bi_private = bp;
1228
1229         for (; size && nr_pages; nr_pages--, map_i++) {
1230                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1231
1232                 if (nbytes > size)
1233                         nbytes = size;
1234
1235                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1236                 if (rbytes < nbytes)
1237                         break;
1238
1239                 offset = 0;
1240                 sector += nbytes >> BBSHIFT;
1241                 size -= nbytes;
1242                 total_nr_pages--;
1243         }
1244
1245 submit_io:
1246         if (likely(bio->bi_size)) {
1247                 submit_bio(rw, bio);
1248                 if (size)
1249                         goto next_chunk;
1250         } else {
1251                 bio_put(bio);
1252                 xfs_buf_ioerror(bp, EIO);
1253         }
1254 }
1255
1256 int
1257 xfs_buf_iorequest(
1258         xfs_buf_t               *bp)
1259 {
1260         XB_TRACE(bp, "iorequest", 0);
1261
1262         if (bp->b_flags & XBF_DELWRI) {
1263                 xfs_buf_delwri_queue(bp, 1);
1264                 return 0;
1265         }
1266
1267         if (bp->b_flags & XBF_WRITE) {
1268                 xfs_buf_wait_unpin(bp);
1269         }
1270
1271         xfs_buf_hold(bp);
1272
1273         /* Set the count to 1 initially, this will stop an I/O
1274          * completion callout which happens before we have started
1275          * all the I/O from calling xfs_buf_ioend too early.
1276          */
1277         atomic_set(&bp->b_io_remaining, 1);
1278         _xfs_buf_ioapply(bp);
1279         _xfs_buf_ioend(bp, 0);
1280
1281         xfs_buf_rele(bp);
1282         return 0;
1283 }
1284
1285 /*
1286  *      Waits for I/O to complete on the buffer supplied.
1287  *      It returns immediately if no I/O is pending.
1288  *      It returns the I/O error code, if any, or 0 if there was no error.
1289  */
1290 int
1291 xfs_buf_iowait(
1292         xfs_buf_t               *bp)
1293 {
1294         XB_TRACE(bp, "iowait", 0);
1295         if (atomic_read(&bp->b_io_remaining))
1296                 blk_run_address_space(bp->b_target->bt_mapping);
1297         down(&bp->b_iodonesema);
1298         XB_TRACE(bp, "iowaited", (long)bp->b_error);
1299         return bp->b_error;
1300 }
1301
1302 xfs_caddr_t
1303 xfs_buf_offset(
1304         xfs_buf_t               *bp,
1305         size_t                  offset)
1306 {
1307         struct page             *page;
1308
1309         if (bp->b_flags & XBF_MAPPED)
1310                 return XFS_BUF_PTR(bp) + offset;
1311
1312         offset += bp->b_offset;
1313         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1314         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1315 }
1316
1317 /*
1318  *      Move data into or out of a buffer.
1319  */
1320 void
1321 xfs_buf_iomove(
1322         xfs_buf_t               *bp,    /* buffer to process            */
1323         size_t                  boff,   /* starting buffer offset       */
1324         size_t                  bsize,  /* length to copy               */
1325         caddr_t                 data,   /* data address                 */
1326         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1327 {
1328         size_t                  bend, cpoff, csize;
1329         struct page             *page;
1330
1331         bend = boff + bsize;
1332         while (boff < bend) {
1333                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1334                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1335                 csize = min_t(size_t,
1336                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1337
1338                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1339
1340                 switch (mode) {
1341                 case XBRW_ZERO:
1342                         memset(page_address(page) + cpoff, 0, csize);
1343                         break;
1344                 case XBRW_READ:
1345                         memcpy(data, page_address(page) + cpoff, csize);
1346                         break;
1347                 case XBRW_WRITE:
1348                         memcpy(page_address(page) + cpoff, data, csize);
1349                 }
1350
1351                 boff += csize;
1352                 data += csize;
1353         }
1354 }
1355
1356 /*
1357  *      Handling of buffer targets (buftargs).
1358  */
1359
1360 /*
1361  *      Wait for any bufs with callbacks that have been submitted but
1362  *      have not yet returned... walk the hash list for the target.
1363  */
1364 void
1365 xfs_wait_buftarg(
1366         xfs_buftarg_t   *btp)
1367 {
1368         xfs_buf_t       *bp, *n;
1369         xfs_bufhash_t   *hash;
1370         uint            i;
1371
1372         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1373                 hash = &btp->bt_hash[i];
1374 again:
1375                 spin_lock(&hash->bh_lock);
1376                 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1377                         ASSERT(btp == bp->b_target);
1378                         if (!(bp->b_flags & XBF_FS_MANAGED)) {
1379                                 spin_unlock(&hash->bh_lock);
1380                                 /*
1381                                  * Catch superblock reference count leaks
1382                                  * immediately
1383                                  */
1384                                 BUG_ON(bp->b_bn == 0);
1385                                 delay(100);
1386                                 goto again;
1387                         }
1388                 }
1389                 spin_unlock(&hash->bh_lock);
1390         }
1391 }
1392
1393 /*
1394  *      Allocate buffer hash table for a given target.
1395  *      For devices containing metadata (i.e. not the log/realtime devices)
1396  *      we need to allocate a much larger hash table.
1397  */
1398 STATIC void
1399 xfs_alloc_bufhash(
1400         xfs_buftarg_t           *btp,
1401         int                     external)
1402 {
1403         unsigned int            i;
1404
1405         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1406         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1407         btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1408                                         sizeof(xfs_bufhash_t), KM_SLEEP);
1409         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1410                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1411                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1412         }
1413 }
1414
1415 STATIC void
1416 xfs_free_bufhash(
1417         xfs_buftarg_t           *btp)
1418 {
1419         kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1420         btp->bt_hash = NULL;
1421 }
1422
1423 /*
1424  *      buftarg list for delwrite queue processing
1425  */
1426 STATIC LIST_HEAD(xfs_buftarg_list);
1427 STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1428
1429 STATIC void
1430 xfs_register_buftarg(
1431         xfs_buftarg_t           *btp)
1432 {
1433         spin_lock(&xfs_buftarg_lock);
1434         list_add(&btp->bt_list, &xfs_buftarg_list);
1435         spin_unlock(&xfs_buftarg_lock);
1436 }
1437
1438 STATIC void
1439 xfs_unregister_buftarg(
1440         xfs_buftarg_t           *btp)
1441 {
1442         spin_lock(&xfs_buftarg_lock);
1443         list_del(&btp->bt_list);
1444         spin_unlock(&xfs_buftarg_lock);
1445 }
1446
1447 void
1448 xfs_free_buftarg(
1449         xfs_buftarg_t           *btp,
1450         int                     external)
1451 {
1452         xfs_flush_buftarg(btp, 1);
1453         if (external)
1454                 xfs_blkdev_put(btp->bt_bdev);
1455         xfs_free_bufhash(btp);
1456         iput(btp->bt_mapping->host);
1457
1458         /* Unregister the buftarg first so that we don't get a
1459          * wakeup finding a non-existent task
1460          */
1461         xfs_unregister_buftarg(btp);
1462         kthread_stop(btp->bt_task);
1463
1464         kmem_free(btp, sizeof(*btp));
1465 }
1466
1467 STATIC int
1468 xfs_setsize_buftarg_flags(
1469         xfs_buftarg_t           *btp,
1470         unsigned int            blocksize,
1471         unsigned int            sectorsize,
1472         int                     verbose)
1473 {
1474         btp->bt_bsize = blocksize;
1475         btp->bt_sshift = ffs(sectorsize) - 1;
1476         btp->bt_smask = sectorsize - 1;
1477
1478         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1479                 printk(KERN_WARNING
1480                         "XFS: Cannot set_blocksize to %u on device %s\n",
1481                         sectorsize, XFS_BUFTARG_NAME(btp));
1482                 return EINVAL;
1483         }
1484
1485         if (verbose &&
1486             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1487                 printk(KERN_WARNING
1488                         "XFS: %u byte sectors in use on device %s.  "
1489                         "This is suboptimal; %u or greater is ideal.\n",
1490                         sectorsize, XFS_BUFTARG_NAME(btp),
1491                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1492         }
1493
1494         return 0;
1495 }
1496
1497 /*
1498  *      When allocating the initial buffer target we have not yet
1499  *      read in the superblock, so don't know what sized sectors
1500  *      are being used is at this early stage.  Play safe.
1501  */
1502 STATIC int
1503 xfs_setsize_buftarg_early(
1504         xfs_buftarg_t           *btp,
1505         struct block_device     *bdev)
1506 {
1507         return xfs_setsize_buftarg_flags(btp,
1508                         PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1509 }
1510
1511 int
1512 xfs_setsize_buftarg(
1513         xfs_buftarg_t           *btp,
1514         unsigned int            blocksize,
1515         unsigned int            sectorsize)
1516 {
1517         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1518 }
1519
1520 STATIC int
1521 xfs_mapping_buftarg(
1522         xfs_buftarg_t           *btp,
1523         struct block_device     *bdev)
1524 {
1525         struct backing_dev_info *bdi;
1526         struct inode            *inode;
1527         struct address_space    *mapping;
1528         static const struct address_space_operations mapping_aops = {
1529                 .sync_page = block_sync_page,
1530                 .migratepage = fail_migrate_page,
1531         };
1532
1533         inode = new_inode(bdev->bd_inode->i_sb);
1534         if (!inode) {
1535                 printk(KERN_WARNING
1536                         "XFS: Cannot allocate mapping inode for device %s\n",
1537                         XFS_BUFTARG_NAME(btp));
1538                 return ENOMEM;
1539         }
1540         inode->i_mode = S_IFBLK;
1541         inode->i_bdev = bdev;
1542         inode->i_rdev = bdev->bd_dev;
1543         bdi = blk_get_backing_dev_info(bdev);
1544         if (!bdi)
1545                 bdi = &default_backing_dev_info;
1546         mapping = &inode->i_data;
1547         mapping->a_ops = &mapping_aops;
1548         mapping->backing_dev_info = bdi;
1549         mapping_set_gfp_mask(mapping, GFP_NOFS);
1550         btp->bt_mapping = mapping;
1551         return 0;
1552 }
1553
1554 STATIC int
1555 xfs_alloc_delwrite_queue(
1556         xfs_buftarg_t           *btp)
1557 {
1558         int     error = 0;
1559
1560         INIT_LIST_HEAD(&btp->bt_list);
1561         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1562         spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1563         btp->bt_flags = 0;
1564         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1565         if (IS_ERR(btp->bt_task)) {
1566                 error = PTR_ERR(btp->bt_task);
1567                 goto out_error;
1568         }
1569         xfs_register_buftarg(btp);
1570 out_error:
1571         return error;
1572 }
1573
1574 xfs_buftarg_t *
1575 xfs_alloc_buftarg(
1576         struct block_device     *bdev,
1577         int                     external)
1578 {
1579         xfs_buftarg_t           *btp;
1580
1581         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1582
1583         btp->bt_dev =  bdev->bd_dev;
1584         btp->bt_bdev = bdev;
1585         if (xfs_setsize_buftarg_early(btp, bdev))
1586                 goto error;
1587         if (xfs_mapping_buftarg(btp, bdev))
1588                 goto error;
1589         if (xfs_alloc_delwrite_queue(btp))
1590                 goto error;
1591         xfs_alloc_bufhash(btp, external);
1592         return btp;
1593
1594 error:
1595         kmem_free(btp, sizeof(*btp));
1596         return NULL;
1597 }
1598
1599
1600 /*
1601  *      Delayed write buffer handling
1602  */
1603 STATIC void
1604 xfs_buf_delwri_queue(
1605         xfs_buf_t               *bp,
1606         int                     unlock)
1607 {
1608         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1609         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1610
1611         XB_TRACE(bp, "delwri_q", (long)unlock);
1612         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1613
1614         spin_lock(dwlk);
1615         /* If already in the queue, dequeue and place at tail */
1616         if (!list_empty(&bp->b_list)) {
1617                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1618                 if (unlock)
1619                         atomic_dec(&bp->b_hold);
1620                 list_del(&bp->b_list);
1621         }
1622
1623         bp->b_flags |= _XBF_DELWRI_Q;
1624         list_add_tail(&bp->b_list, dwq);
1625         bp->b_queuetime = jiffies;
1626         spin_unlock(dwlk);
1627
1628         if (unlock)
1629                 xfs_buf_unlock(bp);
1630 }
1631
1632 void
1633 xfs_buf_delwri_dequeue(
1634         xfs_buf_t               *bp)
1635 {
1636         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1637         int                     dequeued = 0;
1638
1639         spin_lock(dwlk);
1640         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1641                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1642                 list_del_init(&bp->b_list);
1643                 dequeued = 1;
1644         }
1645         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1646         spin_unlock(dwlk);
1647
1648         if (dequeued)
1649                 xfs_buf_rele(bp);
1650
1651         XB_TRACE(bp, "delwri_dq", (long)dequeued);
1652 }
1653
1654 STATIC void
1655 xfs_buf_runall_queues(
1656         struct workqueue_struct *queue)
1657 {
1658         flush_workqueue(queue);
1659 }
1660
1661 STATIC int
1662 xfsbufd_wakeup(
1663         int                     priority,
1664         gfp_t                   mask)
1665 {
1666         xfs_buftarg_t           *btp;
1667
1668         spin_lock(&xfs_buftarg_lock);
1669         list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1670                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1671                         continue;
1672                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1673                 wake_up_process(btp->bt_task);
1674         }
1675         spin_unlock(&xfs_buftarg_lock);
1676         return 0;
1677 }
1678
1679 STATIC int
1680 xfsbufd(
1681         void                    *data)
1682 {
1683         struct list_head        tmp;
1684         unsigned long           age;
1685         xfs_buftarg_t           *target = (xfs_buftarg_t *)data;
1686         xfs_buf_t               *bp, *n;
1687         struct list_head        *dwq = &target->bt_delwrite_queue;
1688         spinlock_t              *dwlk = &target->bt_delwrite_lock;
1689         int                     count;
1690
1691         current->flags |= PF_MEMALLOC;
1692
1693         INIT_LIST_HEAD(&tmp);
1694         do {
1695                 if (unlikely(freezing(current))) {
1696                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1697                         refrigerator();
1698                 } else {
1699                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1700                 }
1701
1702                 schedule_timeout_interruptible(
1703                         xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1704
1705                 count = 0;
1706                 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1707                 spin_lock(dwlk);
1708                 list_for_each_entry_safe(bp, n, dwq, b_list) {
1709                         XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1710                         ASSERT(bp->b_flags & XBF_DELWRI);
1711
1712                         if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1713                                 if (!test_bit(XBT_FORCE_FLUSH,
1714                                                 &target->bt_flags) &&
1715                                     time_before(jiffies,
1716                                                 bp->b_queuetime + age)) {
1717                                         xfs_buf_unlock(bp);
1718                                         break;
1719                                 }
1720
1721                                 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1722                                                  _XBF_RUN_QUEUES);
1723                                 bp->b_flags |= XBF_WRITE;
1724                                 list_move_tail(&bp->b_list, &tmp);
1725                                 count++;
1726                         }
1727                 }
1728                 spin_unlock(dwlk);
1729
1730                 while (!list_empty(&tmp)) {
1731                         bp = list_entry(tmp.next, xfs_buf_t, b_list);
1732                         ASSERT(target == bp->b_target);
1733
1734                         list_del_init(&bp->b_list);
1735                         xfs_buf_iostrategy(bp);
1736                 }
1737
1738                 if (as_list_len > 0)
1739                         purge_addresses();
1740                 if (count)
1741                         blk_run_address_space(target->bt_mapping);
1742
1743                 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1744         } while (!kthread_should_stop());
1745
1746         return 0;
1747 }
1748
1749 /*
1750  *      Go through all incore buffers, and release buffers if they belong to
1751  *      the given device. This is used in filesystem error handling to
1752  *      preserve the consistency of its metadata.
1753  */
1754 int
1755 xfs_flush_buftarg(
1756         xfs_buftarg_t           *target,
1757         int                     wait)
1758 {
1759         struct list_head        tmp;
1760         xfs_buf_t               *bp, *n;
1761         int                     pincount = 0;
1762         struct list_head        *dwq = &target->bt_delwrite_queue;
1763         spinlock_t              *dwlk = &target->bt_delwrite_lock;
1764
1765         xfs_buf_runall_queues(xfsdatad_workqueue);
1766         xfs_buf_runall_queues(xfslogd_workqueue);
1767
1768         INIT_LIST_HEAD(&tmp);
1769         spin_lock(dwlk);
1770         list_for_each_entry_safe(bp, n, dwq, b_list) {
1771                 ASSERT(bp->b_target == target);
1772                 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1773                 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1774                 if (xfs_buf_ispin(bp)) {
1775                         pincount++;
1776                         continue;
1777                 }
1778
1779                 list_move_tail(&bp->b_list, &tmp);
1780         }
1781         spin_unlock(dwlk);
1782
1783         /*
1784          * Dropped the delayed write list lock, now walk the temporary list
1785          */
1786         list_for_each_entry_safe(bp, n, &tmp, b_list) {
1787                 xfs_buf_lock(bp);
1788                 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
1789                 bp->b_flags |= XBF_WRITE;
1790                 if (wait)
1791                         bp->b_flags &= ~XBF_ASYNC;
1792                 else
1793                         list_del_init(&bp->b_list);
1794
1795                 xfs_buf_iostrategy(bp);
1796         }
1797
1798         if (wait)
1799                 blk_run_address_space(target->bt_mapping);
1800
1801         /*
1802          * Remaining list items must be flushed before returning
1803          */
1804         while (!list_empty(&tmp)) {
1805                 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1806
1807                 list_del_init(&bp->b_list);
1808                 xfs_iowait(bp);
1809                 xfs_buf_relse(bp);
1810         }
1811
1812         return pincount;
1813 }
1814
1815 int __init
1816 xfs_buf_init(void)
1817 {
1818 #ifdef XFS_BUF_TRACE
1819         xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1820 #endif
1821
1822         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1823                                                 KM_ZONE_HWALIGN, NULL);
1824         if (!xfs_buf_zone)
1825                 goto out_free_trace_buf;
1826
1827         xfslogd_workqueue = create_workqueue("xfslogd");
1828         if (!xfslogd_workqueue)
1829                 goto out_free_buf_zone;
1830
1831         xfsdatad_workqueue = create_workqueue("xfsdatad");
1832         if (!xfsdatad_workqueue)
1833                 goto out_destroy_xfslogd_workqueue;
1834
1835         xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1836         if (!xfs_buf_shake)
1837                 goto out_destroy_xfsdatad_workqueue;
1838
1839         return 0;
1840
1841  out_destroy_xfsdatad_workqueue:
1842         destroy_workqueue(xfsdatad_workqueue);
1843  out_destroy_xfslogd_workqueue:
1844         destroy_workqueue(xfslogd_workqueue);
1845  out_free_buf_zone:
1846         kmem_zone_destroy(xfs_buf_zone);
1847  out_free_trace_buf:
1848 #ifdef XFS_BUF_TRACE
1849         ktrace_free(xfs_buf_trace_buf);
1850 #endif
1851         return -ENOMEM;
1852 }
1853
1854 void
1855 xfs_buf_terminate(void)
1856 {
1857         kmem_shake_deregister(xfs_buf_shake);
1858         destroy_workqueue(xfsdatad_workqueue);
1859         destroy_workqueue(xfslogd_workqueue);
1860         kmem_zone_destroy(xfs_buf_zone);
1861 #ifdef XFS_BUF_TRACE
1862         ktrace_free(xfs_buf_trace_buf);
1863 #endif
1864 }