2 * Ram backed block device driver.
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/major.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/highmem.h>
18 #include <linux/mutex.h>
19 #include <linux/radix-tree.h>
21 #include <linux/slab.h>
23 #include <asm/uaccess.h>
25 #define SECTOR_SHIFT 9
26 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
27 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
30 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
31 * the pages containing the block device's contents. A brd page's ->index is
32 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
33 * with, the kernel's pagecache or buffer cache (which sit above our block
39 struct request_queue *brd_queue;
40 struct gendisk *brd_disk;
41 struct list_head brd_list;
44 * Backing store of pages and lock to protect it. This is the contents
45 * of the block device.
48 struct radix_tree_root brd_pages;
52 * Look up and return a brd's page for a given sector.
54 static DEFINE_MUTEX(brd_mutex);
55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
61 * The page lifetime is protected by the fact that we have opened the
62 * device node -- brd pages will never be deleted under us, so we
63 * don't need any further locking or refcounting.
65 * This is strictly true for the radix-tree nodes as well (ie. we
66 * don't actually need the rcu_read_lock()), however that is not a
67 * documented feature of the radix-tree API so it is better to be
68 * safe here (we don't have total exclusion from radix tree updates
69 * here, only deletes).
72 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
73 page = radix_tree_lookup(&brd->brd_pages, idx);
76 BUG_ON(page && page->index != idx);
82 * Look up and return a brd's page for a given sector.
83 * If one does not exist, allocate an empty page, and insert that. Then
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
92 page = brd_lookup_page(brd, sector);
97 * Must use NOIO because we don't want to recurse back into the
98 * block or filesystem layers from page reclaim.
100 * Cannot support XIP and highmem, because our ->direct_access
101 * routine for XIP must return memory that is always addressable.
102 * If XIP was reworked to use pfns and kmap throughout, this
103 * restriction might be able to be lifted.
105 gfp_flags = GFP_NOIO | __GFP_ZERO;
106 #ifndef CONFIG_BLK_DEV_XIP
107 gfp_flags |= __GFP_HIGHMEM;
109 page = alloc_page(gfp_flags);
113 if (radix_tree_preload(GFP_NOIO)) {
118 spin_lock(&brd->brd_lock);
119 idx = sector >> PAGE_SECTORS_SHIFT;
121 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
123 page = radix_tree_lookup(&brd->brd_pages, idx);
125 BUG_ON(page->index != idx);
127 spin_unlock(&brd->brd_lock);
129 radix_tree_preload_end();
134 static void brd_free_page(struct brd_device *brd, sector_t sector)
139 spin_lock(&brd->brd_lock);
140 idx = sector >> PAGE_SECTORS_SHIFT;
141 page = radix_tree_delete(&brd->brd_pages, idx);
142 spin_unlock(&brd->brd_lock);
147 static void brd_zero_page(struct brd_device *brd, sector_t sector)
151 page = brd_lookup_page(brd, sector);
153 clear_highpage(page);
157 * Free all backing store pages and radix tree. This must only be called when
158 * there are no other users of the device.
160 #define FREE_BATCH 16
161 static void brd_free_pages(struct brd_device *brd)
163 unsigned long pos = 0;
164 struct page *pages[FREE_BATCH];
170 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
171 (void **)pages, pos, FREE_BATCH);
173 for (i = 0; i < nr_pages; i++) {
176 BUG_ON(pages[i]->index < pos);
177 pos = pages[i]->index;
178 ret = radix_tree_delete(&brd->brd_pages, pos);
179 BUG_ON(!ret || ret != pages[i]);
180 __free_page(pages[i]);
186 * This assumes radix_tree_gang_lookup always returns as
187 * many pages as possible. If the radix-tree code changes,
188 * so will this have to.
190 } while (nr_pages == FREE_BATCH);
194 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
196 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
198 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
201 copy = min_t(size_t, n, PAGE_SIZE - offset);
202 if (!brd_insert_page(brd, sector))
205 sector += copy >> SECTOR_SHIFT;
206 if (!brd_insert_page(brd, sector))
212 static void discard_from_brd(struct brd_device *brd,
213 sector_t sector, size_t n)
215 while (n >= PAGE_SIZE) {
217 * Don't want to actually discard pages here because
218 * re-allocating the pages can result in writeback
219 * deadlocks under heavy load.
222 brd_free_page(brd, sector);
224 brd_zero_page(brd, sector);
225 sector += PAGE_SIZE >> SECTOR_SHIFT;
231 * Copy n bytes from src to the brd starting at sector. Does not sleep.
233 static void copy_to_brd(struct brd_device *brd, const void *src,
234 sector_t sector, size_t n)
238 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
241 copy = min_t(size_t, n, PAGE_SIZE - offset);
242 page = brd_lookup_page(brd, sector);
245 dst = kmap_atomic(page);
246 memcpy(dst + offset, src, copy);
251 sector += copy >> SECTOR_SHIFT;
253 page = brd_lookup_page(brd, sector);
256 dst = kmap_atomic(page);
257 memcpy(dst, src, copy);
263 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
265 static void copy_from_brd(void *dst, struct brd_device *brd,
266 sector_t sector, size_t n)
270 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
273 copy = min_t(size_t, n, PAGE_SIZE - offset);
274 page = brd_lookup_page(brd, sector);
276 src = kmap_atomic(page);
277 memcpy(dst, src + offset, copy);
280 memset(dst, 0, copy);
284 sector += copy >> SECTOR_SHIFT;
286 page = brd_lookup_page(brd, sector);
288 src = kmap_atomic(page);
289 memcpy(dst, src, copy);
292 memset(dst, 0, copy);
297 * Process a single bvec of a bio.
299 static int brd_do_bvec(struct brd_device *brd, struct page *page,
300 unsigned int len, unsigned int off, int rw,
307 err = copy_to_brd_setup(brd, sector, len);
312 mem = kmap_atomic(page);
314 copy_from_brd(mem + off, brd, sector, len);
315 flush_dcache_page(page);
317 flush_dcache_page(page);
318 copy_to_brd(brd, mem + off, sector, len);
326 static void brd_make_request(struct request_queue *q, struct bio *bio)
328 struct block_device *bdev = bio->bi_bdev;
329 struct brd_device *brd = bdev->bd_disk->private_data;
333 struct bvec_iter iter;
336 sector = bio->bi_iter.bi_sector;
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
342 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
350 bio_for_each_segment(bvec, bio, iter) {
351 unsigned int len = bvec.bv_len;
352 err = brd_do_bvec(brd, bvec.bv_page, len,
353 bvec.bv_offset, rw, sector);
356 sector += len >> SECTOR_SHIFT;
363 #ifdef CONFIG_BLK_DEV_XIP
364 static int brd_direct_access(struct block_device *bdev, sector_t sector,
365 void **kaddr, unsigned long *pfn)
367 struct brd_device *brd = bdev->bd_disk->private_data;
372 if (sector & (PAGE_SECTORS-1))
374 if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk))
376 page = brd_insert_page(brd, sector);
379 *kaddr = page_address(page);
380 *pfn = page_to_pfn(page);
386 static int brd_ioctl(struct block_device *bdev, fmode_t mode,
387 unsigned int cmd, unsigned long arg)
390 struct brd_device *brd = bdev->bd_disk->private_data;
392 if (cmd != BLKFLSBUF)
396 * ram device BLKFLSBUF has special semantics, we want to actually
397 * release and destroy the ramdisk data.
399 mutex_lock(&brd_mutex);
400 mutex_lock(&bdev->bd_mutex);
402 if (bdev->bd_openers <= 1) {
404 * Kill the cache first, so it isn't written back to the
407 * Another thread might instantiate more buffercache here,
408 * but there is not much we can do to close that race.
414 mutex_unlock(&bdev->bd_mutex);
415 mutex_unlock(&brd_mutex);
420 static const struct block_device_operations brd_fops = {
421 .owner = THIS_MODULE,
423 #ifdef CONFIG_BLK_DEV_XIP
424 .direct_access = brd_direct_access,
429 * And now the modules code and kernel interface.
432 int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
434 static int part_shift;
435 module_param(rd_nr, int, S_IRUGO);
436 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
437 module_param(rd_size, int, S_IRUGO);
438 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
439 module_param(max_part, int, S_IRUGO);
440 MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
441 MODULE_LICENSE("GPL");
442 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
446 /* Legacy boot options - nonmodular */
447 static int __init ramdisk_size(char *str)
449 rd_size = simple_strtol(str, NULL, 0);
452 __setup("ramdisk_size=", ramdisk_size);
456 * The device scheme is derived from loop.c. Keep them in synch where possible
457 * (should share code eventually).
459 static LIST_HEAD(brd_devices);
460 static DEFINE_MUTEX(brd_devices_mutex);
462 static struct brd_device *brd_alloc(int i)
464 struct brd_device *brd;
465 struct gendisk *disk;
467 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
471 spin_lock_init(&brd->brd_lock);
472 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
474 brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
477 blk_queue_make_request(brd->brd_queue, brd_make_request);
478 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
479 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
481 brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
482 brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
483 brd->brd_queue->limits.discard_zeroes_data = 1;
484 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
486 disk = brd->brd_disk = alloc_disk(1 << part_shift);
489 disk->major = RAMDISK_MAJOR;
490 disk->first_minor = i << part_shift;
491 disk->fops = &brd_fops;
492 disk->private_data = brd;
493 disk->queue = brd->brd_queue;
494 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
495 sprintf(disk->disk_name, "ram%d", i);
496 set_capacity(disk, rd_size * 2);
501 blk_cleanup_queue(brd->brd_queue);
508 static void brd_free(struct brd_device *brd)
510 put_disk(brd->brd_disk);
511 blk_cleanup_queue(brd->brd_queue);
516 static struct brd_device *brd_init_one(int i)
518 struct brd_device *brd;
520 list_for_each_entry(brd, &brd_devices, brd_list) {
521 if (brd->brd_number == i)
527 add_disk(brd->brd_disk);
528 list_add_tail(&brd->brd_list, &brd_devices);
534 static void brd_del_one(struct brd_device *brd)
536 list_del(&brd->brd_list);
537 del_gendisk(brd->brd_disk);
541 static struct kobject *brd_probe(dev_t dev, int *part, void *data)
543 struct brd_device *brd;
544 struct kobject *kobj;
546 mutex_lock(&brd_devices_mutex);
547 brd = brd_init_one(MINOR(dev) >> part_shift);
548 kobj = brd ? get_disk(brd->brd_disk) : NULL;
549 mutex_unlock(&brd_devices_mutex);
555 static int __init brd_init(void)
559 struct brd_device *brd, *next;
562 * brd module now has a feature to instantiate underlying device
563 * structure on-demand, provided that there is an access dev node.
564 * However, this will not work well with user space tool that doesn't
565 * know about such "feature". In order to not break any existing
566 * tool, we do the following:
568 * (1) if rd_nr is specified, create that many upfront, and this
569 * also becomes a hard limit.
570 * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
571 * (default 16) rd device on module load, user can further
572 * extend brd device by create dev node themselves and have
573 * kernel automatically instantiate actual device on-demand.
578 part_shift = fls(max_part);
581 * Adjust max_part according to part_shift as it is exported
582 * to user space so that user can decide correct minor number
583 * if [s]he want to create more devices.
585 * Note that -1 is required because partition 0 is reserved
586 * for the whole disk.
588 max_part = (1UL << part_shift) - 1;
591 if ((1UL << part_shift) > DISK_MAX_PARTS)
594 if (rd_nr > 1UL << (MINORBITS - part_shift))
599 range = rd_nr << part_shift;
601 nr = CONFIG_BLK_DEV_RAM_COUNT;
602 range = 1UL << MINORBITS;
605 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
608 for (i = 0; i < nr; i++) {
612 list_add_tail(&brd->brd_list, &brd_devices);
615 /* point of no return */
617 list_for_each_entry(brd, &brd_devices, brd_list)
618 add_disk(brd->brd_disk);
620 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range,
621 THIS_MODULE, brd_probe, NULL, NULL);
623 printk(KERN_INFO "brd: module loaded\n");
627 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
628 list_del(&brd->brd_list);
631 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
636 static void __exit brd_exit(void)
639 struct brd_device *brd, *next;
641 range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
643 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
646 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range);
647 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
650 module_init(brd_init);
651 module_exit(brd_exit);