2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
18 #define DM_MSG_PREFIX "bufio"
21 * Memory management policy:
22 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
28 #define DM_BUFIO_MIN_BUFFERS 8
30 #define DM_BUFIO_MEMORY_PERCENT 2
31 #define DM_BUFIO_VMALLOC_PERCENT 25
32 #define DM_BUFIO_WRITEBACK_PERCENT 75
35 * Check buffer ages in this interval (seconds)
37 #define DM_BUFIO_WORK_TIMER_SECS 10
40 * Free buffers when they are older than this (seconds)
42 #define DM_BUFIO_DEFAULT_AGE_SECS 60
45 * The number of bvec entries that are embedded directly in the buffer.
46 * If the chunk size is larger, dm-io is used to do the io.
48 #define DM_BUFIO_INLINE_VECS 16
53 #define DM_BUFIO_HASH_BITS 20
54 #define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
66 * dm_buffer->list_mode
74 * All buffers are linked to cache_hash with their hash_list field.
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
87 struct dm_bufio_client {
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
93 struct block_device *bdev;
95 unsigned char sectors_per_block_bits;
96 unsigned char pages_per_block_bits;
97 unsigned char blocks_per_page_bits;
99 void (*alloc_callback)(struct dm_buffer *);
100 void (*write_callback)(struct dm_buffer *);
102 struct dm_io_client *dm_io;
104 struct list_head reserved_buffers;
105 unsigned need_reserved_buffers;
107 unsigned minimum_buffers;
109 struct hlist_head *cache_hash;
110 wait_queue_head_t free_buffer_wait;
112 int async_write_error;
114 struct list_head client_list;
115 struct shrinker shrinker;
126 * Describes how the block was allocated:
127 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
128 * See the comment at alloc_buffer_data.
132 DATA_MODE_GET_FREE_PAGES = 1,
133 DATA_MODE_VMALLOC = 2,
138 struct hlist_node hash_list;
139 struct list_head lru_list;
142 enum data_mode data_mode;
143 unsigned char list_mode; /* LIST_* */
148 unsigned long last_accessed;
149 struct dm_bufio_client *c;
150 struct list_head write_list;
152 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
155 /*----------------------------------------------------------------*/
157 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
158 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
160 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
162 unsigned ret = c->blocks_per_page_bits - 1;
164 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
169 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
170 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
172 #define dm_bufio_in_request() (!!current->bio_list)
174 static void dm_bufio_lock(struct dm_bufio_client *c)
176 mutex_lock_nested(&c->lock, dm_bufio_in_request());
179 static int dm_bufio_trylock(struct dm_bufio_client *c)
181 return mutex_trylock(&c->lock);
184 static void dm_bufio_unlock(struct dm_bufio_client *c)
186 mutex_unlock(&c->lock);
190 * FIXME Move to sched.h?
192 #ifdef CONFIG_PREEMPT_VOLUNTARY
193 # define dm_bufio_cond_resched() \
195 if (unlikely(need_resched())) \
199 # define dm_bufio_cond_resched() do { } while (0)
202 /*----------------------------------------------------------------*/
205 * Default cache size: available memory divided by the ratio.
207 static unsigned long dm_bufio_default_cache_size;
210 * Total cache size set by the user.
212 static unsigned long dm_bufio_cache_size;
215 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
216 * at any time. If it disagrees, the user has changed cache size.
218 static unsigned long dm_bufio_cache_size_latch;
220 static DEFINE_SPINLOCK(param_spinlock);
223 * Buffers are freed after this timeout
225 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
227 static unsigned long dm_bufio_peak_allocated;
228 static unsigned long dm_bufio_allocated_kmem_cache;
229 static unsigned long dm_bufio_allocated_get_free_pages;
230 static unsigned long dm_bufio_allocated_vmalloc;
231 static unsigned long dm_bufio_current_allocated;
233 /*----------------------------------------------------------------*/
236 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
238 static unsigned long dm_bufio_cache_size_per_client;
241 * The current number of clients.
243 static int dm_bufio_client_count;
246 * The list of all clients.
248 static LIST_HEAD(dm_bufio_all_clients);
251 * This mutex protects dm_bufio_cache_size_latch,
252 * dm_bufio_cache_size_per_client and dm_bufio_client_count
254 static DEFINE_MUTEX(dm_bufio_clients_lock);
256 /*----------------------------------------------------------------*/
258 static void adjust_total_allocated(enum data_mode data_mode, long diff)
260 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
261 &dm_bufio_allocated_kmem_cache,
262 &dm_bufio_allocated_get_free_pages,
263 &dm_bufio_allocated_vmalloc,
266 spin_lock(¶m_spinlock);
268 *class_ptr[data_mode] += diff;
270 dm_bufio_current_allocated += diff;
272 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
273 dm_bufio_peak_allocated = dm_bufio_current_allocated;
275 spin_unlock(¶m_spinlock);
279 * Change the number of clients and recalculate per-client limit.
281 static void __cache_size_refresh(void)
283 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
284 BUG_ON(dm_bufio_client_count < 0);
286 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
289 * Use default if set to 0 and report the actual cache size used.
291 if (!dm_bufio_cache_size_latch) {
292 (void)cmpxchg(&dm_bufio_cache_size, 0,
293 dm_bufio_default_cache_size);
294 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
297 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
298 (dm_bufio_client_count ? : 1);
302 * Allocating buffer data.
304 * Small buffers are allocated with kmem_cache, to use space optimally.
306 * For large buffers, we choose between get_free_pages and vmalloc.
307 * Each has advantages and disadvantages.
309 * __get_free_pages can randomly fail if the memory is fragmented.
310 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
311 * as low as 128M) so using it for caching is not appropriate.
313 * If the allocation may fail we use __get_free_pages. Memory fragmentation
314 * won't have a fatal effect here, but it just causes flushes of some other
315 * buffers and more I/O will be performed. Don't use __get_free_pages if it
316 * always fails (i.e. order >= MAX_ORDER).
318 * If the allocation shouldn't fail we use __vmalloc. This is only for the
319 * initial reserve allocation, so there's no risk of wasting all vmalloc
322 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
323 enum data_mode *data_mode)
328 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
329 *data_mode = DATA_MODE_SLAB;
330 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
333 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
334 gfp_mask & __GFP_NORETRY) {
335 *data_mode = DATA_MODE_GET_FREE_PAGES;
336 return (void *)__get_free_pages(gfp_mask,
337 c->pages_per_block_bits);
340 *data_mode = DATA_MODE_VMALLOC;
343 * __vmalloc allocates the data pages and auxiliary structures with
344 * gfp_flags that were specified, but pagetables are always allocated
345 * with GFP_KERNEL, no matter what was specified as gfp_mask.
347 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
348 * all allocations done by this process (including pagetables) are done
349 * as if GFP_NOIO was specified.
352 if (gfp_mask & __GFP_NORETRY)
353 noio_flag = memalloc_noio_save();
355 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
357 if (gfp_mask & __GFP_NORETRY)
358 memalloc_noio_restore(noio_flag);
364 * Free buffer's data.
366 static void free_buffer_data(struct dm_bufio_client *c,
367 void *data, enum data_mode data_mode)
371 kmem_cache_free(DM_BUFIO_CACHE(c), data);
374 case DATA_MODE_GET_FREE_PAGES:
375 free_pages((unsigned long)data, c->pages_per_block_bits);
378 case DATA_MODE_VMALLOC:
383 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
390 * Allocate buffer and its data.
392 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
394 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
402 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
408 adjust_total_allocated(b->data_mode, (long)c->block_size);
414 * Free buffer and its data.
416 static void free_buffer(struct dm_buffer *b)
418 struct dm_bufio_client *c = b->c;
420 adjust_total_allocated(b->data_mode, -(long)c->block_size);
422 free_buffer_data(c, b->data, b->data_mode);
427 * Link buffer to the hash list and clean or dirty queue.
429 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
431 struct dm_bufio_client *c = b->c;
433 c->n_buffers[dirty]++;
435 b->list_mode = dirty;
436 list_add(&b->lru_list, &c->lru[dirty]);
437 hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
438 b->last_accessed = jiffies;
442 * Unlink buffer from the hash list and dirty or clean queue.
444 static void __unlink_buffer(struct dm_buffer *b)
446 struct dm_bufio_client *c = b->c;
448 BUG_ON(!c->n_buffers[b->list_mode]);
450 c->n_buffers[b->list_mode]--;
451 hlist_del(&b->hash_list);
452 list_del(&b->lru_list);
456 * Place the buffer to the head of dirty or clean LRU queue.
458 static void __relink_lru(struct dm_buffer *b, int dirty)
460 struct dm_bufio_client *c = b->c;
462 BUG_ON(!c->n_buffers[b->list_mode]);
464 c->n_buffers[b->list_mode]--;
465 c->n_buffers[dirty]++;
466 b->list_mode = dirty;
467 list_move(&b->lru_list, &c->lru[dirty]);
470 /*----------------------------------------------------------------
471 * Submit I/O on the buffer.
473 * Bio interface is faster but it has some problems:
474 * the vector list is limited (increasing this limit increases
475 * memory-consumption per buffer, so it is not viable);
477 * the memory must be direct-mapped, not vmalloced;
479 * the I/O driver can reject requests spuriously if it thinks that
480 * the requests are too big for the device or if they cross a
481 * controller-defined memory boundary.
483 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
484 * it is not vmalloced, try using the bio interface.
486 * If the buffer is big, if it is vmalloced or if the underlying device
487 * rejects the bio because it is too large, use dm-io layer to do the I/O.
488 * The dm-io layer splits the I/O into multiple requests, avoiding the above
490 *--------------------------------------------------------------*/
493 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
494 * that the request was handled directly with bio interface.
496 static void dmio_complete(unsigned long error, void *context)
498 struct dm_buffer *b = context;
500 b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
503 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
504 bio_end_io_t *end_io)
507 struct dm_io_request io_req = {
509 .notify.fn = dmio_complete,
511 .client = b->c->dm_io,
513 struct dm_io_region region = {
515 .sector = block << b->c->sectors_per_block_bits,
516 .count = b->c->block_size >> SECTOR_SHIFT,
519 if (b->data_mode != DATA_MODE_VMALLOC) {
520 io_req.mem.type = DM_IO_KMEM;
521 io_req.mem.ptr.addr = b->data;
523 io_req.mem.type = DM_IO_VMA;
524 io_req.mem.ptr.vma = b->data;
527 b->bio.bi_end_io = end_io;
529 r = dm_io(&io_req, 1, ®ion, NULL);
534 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
535 bio_end_io_t *end_io)
541 b->bio.bi_io_vec = b->bio_vec;
542 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
543 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
544 b->bio.bi_bdev = b->c->bdev;
545 b->bio.bi_end_io = end_io;
548 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
549 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
552 len = b->c->block_size;
554 if (len >= PAGE_SIZE)
555 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
557 BUG_ON((unsigned long)ptr & (len - 1));
560 if (!bio_add_page(&b->bio, virt_to_page(ptr),
561 len < PAGE_SIZE ? len : PAGE_SIZE,
562 virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
563 BUG_ON(b->c->block_size <= PAGE_SIZE);
564 use_dmio(b, rw, block, end_io);
572 submit_bio(rw, &b->bio);
575 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
576 bio_end_io_t *end_io)
578 if (rw == WRITE && b->c->write_callback)
579 b->c->write_callback(b);
581 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
582 b->data_mode != DATA_MODE_VMALLOC)
583 use_inline_bio(b, rw, block, end_io);
585 use_dmio(b, rw, block, end_io);
588 /*----------------------------------------------------------------
589 * Writing dirty buffers
590 *--------------------------------------------------------------*/
593 * The endio routine for write.
595 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
598 static void write_endio(struct bio *bio, int error)
600 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
602 b->write_error = error;
603 if (unlikely(error)) {
604 struct dm_bufio_client *c = b->c;
605 (void)cmpxchg(&c->async_write_error, 0, error);
608 BUG_ON(!test_bit(B_WRITING, &b->state));
610 smp_mb__before_atomic();
611 clear_bit(B_WRITING, &b->state);
612 smp_mb__after_atomic();
614 wake_up_bit(&b->state, B_WRITING);
618 * Initiate a write on a dirty buffer, but don't wait for it.
620 * - If the buffer is not dirty, exit.
621 * - If there some previous write going on, wait for it to finish (we can't
622 * have two writes on the same buffer simultaneously).
623 * - Submit our write and don't wait on it. We set B_WRITING indicating
624 * that there is a write in progress.
626 static void __write_dirty_buffer(struct dm_buffer *b,
627 struct list_head *write_list)
629 if (!test_bit(B_DIRTY, &b->state))
632 clear_bit(B_DIRTY, &b->state);
633 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
636 submit_io(b, WRITE, b->block, write_endio);
638 list_add_tail(&b->write_list, write_list);
641 static void __flush_write_list(struct list_head *write_list)
643 struct blk_plug plug;
644 blk_start_plug(&plug);
645 while (!list_empty(write_list)) {
646 struct dm_buffer *b =
647 list_entry(write_list->next, struct dm_buffer, write_list);
648 list_del(&b->write_list);
649 submit_io(b, WRITE, b->block, write_endio);
650 dm_bufio_cond_resched();
652 blk_finish_plug(&plug);
656 * Wait until any activity on the buffer finishes. Possibly write the
657 * buffer if it is dirty. When this function finishes, there is no I/O
658 * running on the buffer and the buffer is not dirty.
660 static void __make_buffer_clean(struct dm_buffer *b)
662 BUG_ON(b->hold_count);
664 if (!b->state) /* fast case */
667 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
668 __write_dirty_buffer(b, NULL);
669 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
673 * Find some buffer that is not held by anybody, clean it, unlink it and
676 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
680 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
681 BUG_ON(test_bit(B_WRITING, &b->state));
682 BUG_ON(test_bit(B_DIRTY, &b->state));
684 if (!b->hold_count) {
685 __make_buffer_clean(b);
689 dm_bufio_cond_resched();
692 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
693 BUG_ON(test_bit(B_READING, &b->state));
695 if (!b->hold_count) {
696 __make_buffer_clean(b);
700 dm_bufio_cond_resched();
707 * Wait until some other threads free some buffer or release hold count on
710 * This function is entered with c->lock held, drops it and regains it
713 static void __wait_for_free_buffer(struct dm_bufio_client *c)
715 DECLARE_WAITQUEUE(wait, current);
717 add_wait_queue(&c->free_buffer_wait, &wait);
718 set_task_state(current, TASK_UNINTERRUPTIBLE);
723 remove_wait_queue(&c->free_buffer_wait, &wait);
736 * Allocate a new buffer. If the allocation is not possible, wait until
737 * some other thread frees a buffer.
739 * May drop the lock and regain it.
741 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
746 * dm-bufio is resistant to allocation failures (it just keeps
747 * one buffer reserved in cases all the allocations fail).
748 * So set flags to not try too hard:
749 * GFP_NOIO: don't recurse into the I/O layer
750 * __GFP_NORETRY: don't retry and rather return failure
751 * __GFP_NOMEMALLOC: don't use emergency reserves
752 * __GFP_NOWARN: don't print a warning in case of failure
754 * For debugging, if we set the cache size to 1, no new buffers will
758 if (dm_bufio_cache_size_latch != 1) {
759 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
764 if (nf == NF_PREFETCH)
767 if (!list_empty(&c->reserved_buffers)) {
768 b = list_entry(c->reserved_buffers.next,
769 struct dm_buffer, lru_list);
770 list_del(&b->lru_list);
771 c->need_reserved_buffers++;
776 b = __get_unclaimed_buffer(c);
780 __wait_for_free_buffer(c);
784 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
786 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
791 if (c->alloc_callback)
792 c->alloc_callback(b);
798 * Free a buffer and wake other threads waiting for free buffers.
800 static void __free_buffer_wake(struct dm_buffer *b)
802 struct dm_bufio_client *c = b->c;
804 if (!c->need_reserved_buffers)
807 list_add(&b->lru_list, &c->reserved_buffers);
808 c->need_reserved_buffers--;
811 wake_up(&c->free_buffer_wait);
814 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
815 struct list_head *write_list)
817 struct dm_buffer *b, *tmp;
819 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
820 BUG_ON(test_bit(B_READING, &b->state));
822 if (!test_bit(B_DIRTY, &b->state) &&
823 !test_bit(B_WRITING, &b->state)) {
824 __relink_lru(b, LIST_CLEAN);
828 if (no_wait && test_bit(B_WRITING, &b->state))
831 __write_dirty_buffer(b, write_list);
832 dm_bufio_cond_resched();
837 * Get writeback threshold and buffer limit for a given client.
839 static void __get_memory_limit(struct dm_bufio_client *c,
840 unsigned long *threshold_buffers,
841 unsigned long *limit_buffers)
843 unsigned long buffers;
845 if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
846 mutex_lock(&dm_bufio_clients_lock);
847 __cache_size_refresh();
848 mutex_unlock(&dm_bufio_clients_lock);
851 buffers = dm_bufio_cache_size_per_client >>
852 (c->sectors_per_block_bits + SECTOR_SHIFT);
854 if (buffers < c->minimum_buffers)
855 buffers = c->minimum_buffers;
857 *limit_buffers = buffers;
858 *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
862 * Check if we're over watermark.
863 * If we are over threshold_buffers, start freeing buffers.
864 * If we're over "limit_buffers", block until we get under the limit.
866 static void __check_watermark(struct dm_bufio_client *c,
867 struct list_head *write_list)
869 unsigned long threshold_buffers, limit_buffers;
871 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
873 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
876 struct dm_buffer *b = __get_unclaimed_buffer(c);
881 __free_buffer_wake(b);
882 dm_bufio_cond_resched();
885 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
886 __write_dirty_buffers_async(c, 1, write_list);
890 * Find a buffer in the hash.
892 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
896 hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
898 dm_bufio_cond_resched();
899 if (b->block == block)
906 /*----------------------------------------------------------------
908 *--------------------------------------------------------------*/
910 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
911 enum new_flag nf, int *need_submit,
912 struct list_head *write_list)
914 struct dm_buffer *b, *new_b = NULL;
918 b = __find(c, block);
925 new_b = __alloc_buffer_wait(c, nf);
930 * We've had a period where the mutex was unlocked, so need to
931 * recheck the hash table.
933 b = __find(c, block);
935 __free_buffer_wake(new_b);
939 __check_watermark(c, write_list);
945 __link_buffer(b, block, LIST_CLEAN);
947 if (nf == NF_FRESH) {
952 b->state = 1 << B_READING;
958 if (nf == NF_PREFETCH)
961 * Note: it is essential that we don't wait for the buffer to be
962 * read if dm_bufio_get function is used. Both dm_bufio_get and
963 * dm_bufio_prefetch can be used in the driver request routine.
964 * If the user called both dm_bufio_prefetch and dm_bufio_get on
965 * the same buffer, it would deadlock if we waited.
967 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
971 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
972 test_bit(B_WRITING, &b->state));
977 * The endio routine for reading: set the error, clear the bit and wake up
978 * anyone waiting on the buffer.
980 static void read_endio(struct bio *bio, int error)
982 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
984 b->read_error = error;
986 BUG_ON(!test_bit(B_READING, &b->state));
988 smp_mb__before_atomic();
989 clear_bit(B_READING, &b->state);
990 smp_mb__after_atomic();
992 wake_up_bit(&b->state, B_READING);
996 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
997 * functions is similar except that dm_bufio_new doesn't read the
998 * buffer from the disk (assuming that the caller overwrites all the data
999 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1001 static void *new_read(struct dm_bufio_client *c, sector_t block,
1002 enum new_flag nf, struct dm_buffer **bp)
1005 struct dm_buffer *b;
1007 LIST_HEAD(write_list);
1010 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1013 __flush_write_list(&write_list);
1019 submit_io(b, READ, b->block, read_endio);
1021 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1023 if (b->read_error) {
1024 int error = b->read_error;
1026 dm_bufio_release(b);
1028 return ERR_PTR(error);
1036 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1037 struct dm_buffer **bp)
1039 return new_read(c, block, NF_GET, bp);
1041 EXPORT_SYMBOL_GPL(dm_bufio_get);
1043 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1044 struct dm_buffer **bp)
1046 BUG_ON(dm_bufio_in_request());
1048 return new_read(c, block, NF_READ, bp);
1050 EXPORT_SYMBOL_GPL(dm_bufio_read);
1052 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1053 struct dm_buffer **bp)
1055 BUG_ON(dm_bufio_in_request());
1057 return new_read(c, block, NF_FRESH, bp);
1059 EXPORT_SYMBOL_GPL(dm_bufio_new);
1061 void dm_bufio_prefetch(struct dm_bufio_client *c,
1062 sector_t block, unsigned n_blocks)
1064 struct blk_plug plug;
1066 LIST_HEAD(write_list);
1068 BUG_ON(dm_bufio_in_request());
1070 blk_start_plug(&plug);
1073 for (; n_blocks--; block++) {
1075 struct dm_buffer *b;
1076 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1078 if (unlikely(!list_empty(&write_list))) {
1080 blk_finish_plug(&plug);
1081 __flush_write_list(&write_list);
1082 blk_start_plug(&plug);
1085 if (unlikely(b != NULL)) {
1089 submit_io(b, READ, b->block, read_endio);
1090 dm_bufio_release(b);
1092 dm_bufio_cond_resched();
1103 blk_finish_plug(&plug);
1105 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1107 void dm_bufio_release(struct dm_buffer *b)
1109 struct dm_bufio_client *c = b->c;
1113 BUG_ON(!b->hold_count);
1116 if (!b->hold_count) {
1117 wake_up(&c->free_buffer_wait);
1120 * If there were errors on the buffer, and the buffer is not
1121 * to be written, free the buffer. There is no point in caching
1124 if ((b->read_error || b->write_error) &&
1125 !test_bit(B_READING, &b->state) &&
1126 !test_bit(B_WRITING, &b->state) &&
1127 !test_bit(B_DIRTY, &b->state)) {
1129 __free_buffer_wake(b);
1135 EXPORT_SYMBOL_GPL(dm_bufio_release);
1137 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1139 struct dm_bufio_client *c = b->c;
1143 BUG_ON(test_bit(B_READING, &b->state));
1145 if (!test_and_set_bit(B_DIRTY, &b->state))
1146 __relink_lru(b, LIST_DIRTY);
1150 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1152 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1154 LIST_HEAD(write_list);
1156 BUG_ON(dm_bufio_in_request());
1159 __write_dirty_buffers_async(c, 0, &write_list);
1161 __flush_write_list(&write_list);
1163 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1166 * For performance, it is essential that the buffers are written asynchronously
1167 * and simultaneously (so that the block layer can merge the writes) and then
1170 * Finally, we flush hardware disk cache.
1172 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1175 unsigned long buffers_processed = 0;
1176 struct dm_buffer *b, *tmp;
1178 LIST_HEAD(write_list);
1181 __write_dirty_buffers_async(c, 0, &write_list);
1183 __flush_write_list(&write_list);
1187 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1188 int dropped_lock = 0;
1190 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1191 buffers_processed++;
1193 BUG_ON(test_bit(B_READING, &b->state));
1195 if (test_bit(B_WRITING, &b->state)) {
1196 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1200 wait_on_bit_io(&b->state, B_WRITING,
1201 TASK_UNINTERRUPTIBLE);
1205 wait_on_bit_io(&b->state, B_WRITING,
1206 TASK_UNINTERRUPTIBLE);
1209 if (!test_bit(B_DIRTY, &b->state) &&
1210 !test_bit(B_WRITING, &b->state))
1211 __relink_lru(b, LIST_CLEAN);
1213 dm_bufio_cond_resched();
1216 * If we dropped the lock, the list is no longer consistent,
1217 * so we must restart the search.
1219 * In the most common case, the buffer just processed is
1220 * relinked to the clean list, so we won't loop scanning the
1221 * same buffer again and again.
1223 * This may livelock if there is another thread simultaneously
1224 * dirtying buffers, so we count the number of buffers walked
1225 * and if it exceeds the total number of buffers, it means that
1226 * someone is doing some writes simultaneously with us. In
1227 * this case, stop, dropping the lock.
1232 wake_up(&c->free_buffer_wait);
1235 a = xchg(&c->async_write_error, 0);
1236 f = dm_bufio_issue_flush(c);
1242 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1245 * Use dm-io to send and empty barrier flush the device.
1247 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1249 struct dm_io_request io_req = {
1250 .bi_rw = WRITE_FLUSH,
1251 .mem.type = DM_IO_KMEM,
1252 .mem.ptr.addr = NULL,
1255 struct dm_io_region io_reg = {
1261 BUG_ON(dm_bufio_in_request());
1263 return dm_io(&io_req, 1, &io_reg, NULL);
1265 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1268 * We first delete any other buffer that may be at that new location.
1270 * Then, we write the buffer to the original location if it was dirty.
1272 * Then, if we are the only one who is holding the buffer, relink the buffer
1273 * in the hash queue for the new location.
1275 * If there was someone else holding the buffer, we write it to the new
1276 * location but not relink it, because that other user needs to have the buffer
1277 * at the same place.
1279 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1281 struct dm_bufio_client *c = b->c;
1282 struct dm_buffer *new;
1284 BUG_ON(dm_bufio_in_request());
1289 new = __find(c, new_block);
1291 if (new->hold_count) {
1292 __wait_for_free_buffer(c);
1297 * FIXME: Is there any point waiting for a write that's going
1298 * to be overwritten in a bit?
1300 __make_buffer_clean(new);
1301 __unlink_buffer(new);
1302 __free_buffer_wake(new);
1305 BUG_ON(!b->hold_count);
1306 BUG_ON(test_bit(B_READING, &b->state));
1308 __write_dirty_buffer(b, NULL);
1309 if (b->hold_count == 1) {
1310 wait_on_bit_io(&b->state, B_WRITING,
1311 TASK_UNINTERRUPTIBLE);
1312 set_bit(B_DIRTY, &b->state);
1314 __link_buffer(b, new_block, LIST_DIRTY);
1317 wait_on_bit_lock_io(&b->state, B_WRITING,
1318 TASK_UNINTERRUPTIBLE);
1320 * Relink buffer to "new_block" so that write_callback
1321 * sees "new_block" as a block number.
1322 * After the write, link the buffer back to old_block.
1323 * All this must be done in bufio lock, so that block number
1324 * change isn't visible to other threads.
1326 old_block = b->block;
1328 __link_buffer(b, new_block, b->list_mode);
1329 submit_io(b, WRITE, new_block, write_endio);
1330 wait_on_bit_io(&b->state, B_WRITING,
1331 TASK_UNINTERRUPTIBLE);
1333 __link_buffer(b, old_block, b->list_mode);
1337 dm_bufio_release(b);
1339 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1342 * Free the given buffer.
1344 * This is just a hint, if the buffer is in use or dirty, this function
1347 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1349 struct dm_buffer *b;
1353 b = __find(c, block);
1354 if (b && likely(!b->hold_count) && likely(!b->state)) {
1356 __free_buffer_wake(b);
1361 EXPORT_SYMBOL(dm_bufio_forget);
1363 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1365 c->minimum_buffers = n;
1367 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1369 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1371 return c->block_size;
1373 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1375 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1377 return i_size_read(c->bdev->bd_inode) >>
1378 (SECTOR_SHIFT + c->sectors_per_block_bits);
1380 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1382 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1386 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1388 void *dm_bufio_get_block_data(struct dm_buffer *b)
1392 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1394 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1398 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1400 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1404 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1406 static void drop_buffers(struct dm_bufio_client *c)
1408 struct dm_buffer *b;
1411 BUG_ON(dm_bufio_in_request());
1414 * An optimization so that the buffers are not written one-by-one.
1416 dm_bufio_write_dirty_buffers_async(c);
1420 while ((b = __get_unclaimed_buffer(c)))
1421 __free_buffer_wake(b);
1423 for (i = 0; i < LIST_SIZE; i++)
1424 list_for_each_entry(b, &c->lru[i], lru_list)
1425 DMERR("leaked buffer %llx, hold count %u, list %d",
1426 (unsigned long long)b->block, b->hold_count, i);
1428 for (i = 0; i < LIST_SIZE; i++)
1429 BUG_ON(!list_empty(&c->lru[i]));
1435 * Test if the buffer is unused and too old, and commit it.
1436 * At if noio is set, we must not do any I/O because we hold
1437 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
1438 * different bufio client.
1440 static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1441 unsigned long max_jiffies)
1443 if (jiffies - b->last_accessed < max_jiffies)
1446 if (!(gfp & __GFP_IO)) {
1447 if (test_bit(B_READING, &b->state) ||
1448 test_bit(B_WRITING, &b->state) ||
1449 test_bit(B_DIRTY, &b->state))
1456 __make_buffer_clean(b);
1458 __free_buffer_wake(b);
1463 static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1467 struct dm_buffer *b, *tmp;
1470 for (l = 0; l < LIST_SIZE; l++) {
1471 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1472 freed += __cleanup_old_buffer(b, gfp_mask, 0);
1476 dm_bufio_cond_resched();
1481 static unsigned long
1482 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1484 struct dm_bufio_client *c;
1485 unsigned long freed;
1487 c = container_of(shrink, struct dm_bufio_client, shrinker);
1488 if (sc->gfp_mask & __GFP_IO)
1490 else if (!dm_bufio_trylock(c))
1493 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1498 static unsigned long
1499 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1501 struct dm_bufio_client *c;
1502 unsigned long count;
1504 c = container_of(shrink, struct dm_bufio_client, shrinker);
1505 if (sc->gfp_mask & __GFP_IO)
1507 else if (!dm_bufio_trylock(c))
1510 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1516 * Create the buffering interface
1518 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1519 unsigned reserved_buffers, unsigned aux_size,
1520 void (*alloc_callback)(struct dm_buffer *),
1521 void (*write_callback)(struct dm_buffer *))
1524 struct dm_bufio_client *c;
1527 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1528 (block_size & (block_size - 1)));
1530 c = kzalloc(sizeof(*c), GFP_KERNEL);
1535 c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1536 if (!c->cache_hash) {
1542 c->block_size = block_size;
1543 c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1544 c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1545 ffs(block_size) - 1 - PAGE_SHIFT : 0;
1546 c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1547 PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1549 c->aux_size = aux_size;
1550 c->alloc_callback = alloc_callback;
1551 c->write_callback = write_callback;
1553 for (i = 0; i < LIST_SIZE; i++) {
1554 INIT_LIST_HEAD(&c->lru[i]);
1555 c->n_buffers[i] = 0;
1558 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1559 INIT_HLIST_HEAD(&c->cache_hash[i]);
1561 mutex_init(&c->lock);
1562 INIT_LIST_HEAD(&c->reserved_buffers);
1563 c->need_reserved_buffers = reserved_buffers;
1565 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1567 init_waitqueue_head(&c->free_buffer_wait);
1568 c->async_write_error = 0;
1570 c->dm_io = dm_io_client_create();
1571 if (IS_ERR(c->dm_io)) {
1572 r = PTR_ERR(c->dm_io);
1576 mutex_lock(&dm_bufio_clients_lock);
1577 if (c->blocks_per_page_bits) {
1578 if (!DM_BUFIO_CACHE_NAME(c)) {
1579 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1580 if (!DM_BUFIO_CACHE_NAME(c)) {
1582 mutex_unlock(&dm_bufio_clients_lock);
1587 if (!DM_BUFIO_CACHE(c)) {
1588 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1590 c->block_size, 0, NULL);
1591 if (!DM_BUFIO_CACHE(c)) {
1593 mutex_unlock(&dm_bufio_clients_lock);
1598 mutex_unlock(&dm_bufio_clients_lock);
1600 while (c->need_reserved_buffers) {
1601 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1607 __free_buffer_wake(b);
1610 mutex_lock(&dm_bufio_clients_lock);
1611 dm_bufio_client_count++;
1612 list_add(&c->client_list, &dm_bufio_all_clients);
1613 __cache_size_refresh();
1614 mutex_unlock(&dm_bufio_clients_lock);
1616 c->shrinker.count_objects = dm_bufio_shrink_count;
1617 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1618 c->shrinker.seeks = 1;
1619 c->shrinker.batch = 0;
1620 register_shrinker(&c->shrinker);
1626 while (!list_empty(&c->reserved_buffers)) {
1627 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1628 struct dm_buffer, lru_list);
1629 list_del(&b->lru_list);
1632 dm_io_client_destroy(c->dm_io);
1634 vfree(c->cache_hash);
1640 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1643 * Free the buffering interface.
1644 * It is required that there are no references on any buffers.
1646 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1652 unregister_shrinker(&c->shrinker);
1654 mutex_lock(&dm_bufio_clients_lock);
1656 list_del(&c->client_list);
1657 dm_bufio_client_count--;
1658 __cache_size_refresh();
1660 mutex_unlock(&dm_bufio_clients_lock);
1662 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1663 BUG_ON(!hlist_empty(&c->cache_hash[i]));
1665 BUG_ON(c->need_reserved_buffers);
1667 while (!list_empty(&c->reserved_buffers)) {
1668 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1669 struct dm_buffer, lru_list);
1670 list_del(&b->lru_list);
1674 for (i = 0; i < LIST_SIZE; i++)
1675 if (c->n_buffers[i])
1676 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1678 for (i = 0; i < LIST_SIZE; i++)
1679 BUG_ON(c->n_buffers[i]);
1681 dm_io_client_destroy(c->dm_io);
1682 vfree(c->cache_hash);
1685 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1687 static void cleanup_old_buffers(void)
1689 unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age);
1690 struct dm_bufio_client *c;
1692 if (max_age > ULONG_MAX / HZ)
1693 max_age = ULONG_MAX / HZ;
1695 mutex_lock(&dm_bufio_clients_lock);
1696 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1697 if (!dm_bufio_trylock(c))
1700 while (!list_empty(&c->lru[LIST_CLEAN])) {
1701 struct dm_buffer *b;
1702 b = list_entry(c->lru[LIST_CLEAN].prev,
1703 struct dm_buffer, lru_list);
1704 if (!__cleanup_old_buffer(b, 0, max_age * HZ))
1706 dm_bufio_cond_resched();
1710 dm_bufio_cond_resched();
1712 mutex_unlock(&dm_bufio_clients_lock);
1715 static struct workqueue_struct *dm_bufio_wq;
1716 static struct delayed_work dm_bufio_work;
1718 static void work_fn(struct work_struct *w)
1720 cleanup_old_buffers();
1722 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1723 DM_BUFIO_WORK_TIMER_SECS * HZ);
1726 /*----------------------------------------------------------------
1728 *--------------------------------------------------------------*/
1731 * This is called only once for the whole dm_bufio module.
1732 * It initializes memory limit.
1734 static int __init dm_bufio_init(void)
1738 dm_bufio_allocated_kmem_cache = 0;
1739 dm_bufio_allocated_get_free_pages = 0;
1740 dm_bufio_allocated_vmalloc = 0;
1741 dm_bufio_current_allocated = 0;
1743 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1744 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1746 mem = (__u64)((totalram_pages - totalhigh_pages) *
1747 DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1749 if (mem > ULONG_MAX)
1754 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1755 * in fs/proc/internal.h
1757 if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1758 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1761 dm_bufio_default_cache_size = mem;
1763 mutex_lock(&dm_bufio_clients_lock);
1764 __cache_size_refresh();
1765 mutex_unlock(&dm_bufio_clients_lock);
1767 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1771 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1772 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1773 DM_BUFIO_WORK_TIMER_SECS * HZ);
1779 * This is called once when unloading the dm_bufio module.
1781 static void __exit dm_bufio_exit(void)
1786 cancel_delayed_work_sync(&dm_bufio_work);
1787 destroy_workqueue(dm_bufio_wq);
1789 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1790 struct kmem_cache *kc = dm_bufio_caches[i];
1793 kmem_cache_destroy(kc);
1796 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1797 kfree(dm_bufio_cache_names[i]);
1799 if (dm_bufio_client_count) {
1800 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1801 __func__, dm_bufio_client_count);
1805 if (dm_bufio_current_allocated) {
1806 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1807 __func__, dm_bufio_current_allocated);
1811 if (dm_bufio_allocated_get_free_pages) {
1812 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1813 __func__, dm_bufio_allocated_get_free_pages);
1817 if (dm_bufio_allocated_vmalloc) {
1818 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1819 __func__, dm_bufio_allocated_vmalloc);
1827 module_init(dm_bufio_init)
1828 module_exit(dm_bufio_exit)
1830 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1831 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1833 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1834 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1836 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1837 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1839 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1840 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1842 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1843 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1845 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1846 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1848 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1849 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1851 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1852 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1853 MODULE_LICENSE("GPL");