2 * Persistent Memory Driver
4 * Copyright (c) 2014, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/slab.h>
27 #define PMEM_MINORS 16
30 struct request_queue *pmem_queue;
31 struct gendisk *pmem_disk;
33 /* One contiguous memory region per device */
34 phys_addr_t phys_addr;
39 static int pmem_major;
40 static atomic_t pmem_index;
42 static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
43 unsigned int len, unsigned int off, int rw,
46 void *mem = kmap_atomic(page);
47 size_t pmem_off = sector << 9;
50 memcpy(mem + off, pmem->virt_addr + pmem_off, len);
51 flush_dcache_page(page);
53 flush_dcache_page(page);
54 memcpy(pmem->virt_addr + pmem_off, mem + off, len);
60 static void pmem_make_request(struct request_queue *q, struct bio *bio)
62 struct block_device *bdev = bio->bi_bdev;
63 struct pmem_device *pmem = bdev->bd_disk->private_data;
67 struct bvec_iter iter;
70 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) {
75 BUG_ON(bio->bi_rw & REQ_DISCARD);
77 rw = bio_data_dir(bio);
78 sector = bio->bi_iter.bi_sector;
79 bio_for_each_segment(bvec, bio, iter) {
80 pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
82 sector += bvec.bv_len >> 9;
89 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
90 struct page *page, int rw)
92 struct pmem_device *pmem = bdev->bd_disk->private_data;
94 pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
95 page_endio(page, rw & WRITE, 0);
100 static long pmem_direct_access(struct block_device *bdev, sector_t sector,
101 void **kaddr, unsigned long *pfn, long size)
103 struct pmem_device *pmem = bdev->bd_disk->private_data;
104 size_t offset = sector << 9;
109 *kaddr = pmem->virt_addr + offset;
110 *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
112 return pmem->size - offset;
115 static const struct block_device_operations pmem_fops = {
116 .owner = THIS_MODULE,
117 .rw_page = pmem_rw_page,
118 .direct_access = pmem_direct_access,
121 static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res)
123 struct pmem_device *pmem;
124 struct gendisk *disk;
128 pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
132 pmem->phys_addr = res->start;
133 pmem->size = resource_size(res);
136 if (!request_mem_region(pmem->phys_addr, pmem->size, "pmem")) {
137 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", &pmem->phys_addr, pmem->size);
142 * Map the memory as write-through, as we can't write back the contents
143 * of the CPU caches in case of a crash.
146 pmem->virt_addr = ioremap_wt(pmem->phys_addr, pmem->size);
147 if (!pmem->virt_addr)
148 goto out_release_region;
150 pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
151 if (!pmem->pmem_queue)
154 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
155 blk_queue_max_hw_sectors(pmem->pmem_queue, 1024);
156 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
158 disk = alloc_disk(PMEM_MINORS);
162 idx = atomic_inc_return(&pmem_index) - 1;
164 disk->major = pmem_major;
165 disk->first_minor = PMEM_MINORS * idx;
166 disk->fops = &pmem_fops;
167 disk->private_data = pmem;
168 disk->queue = pmem->pmem_queue;
169 disk->flags = GENHD_FL_EXT_DEVT;
170 sprintf(disk->disk_name, "pmem%d", idx);
171 disk->driverfs_dev = dev;
172 set_capacity(disk, pmem->size >> 9);
173 pmem->pmem_disk = disk;
180 blk_cleanup_queue(pmem->pmem_queue);
182 iounmap(pmem->virt_addr);
184 release_mem_region(pmem->phys_addr, pmem->size);
191 static void pmem_free(struct pmem_device *pmem)
193 del_gendisk(pmem->pmem_disk);
194 put_disk(pmem->pmem_disk);
195 blk_cleanup_queue(pmem->pmem_queue);
196 iounmap(pmem->virt_addr);
197 release_mem_region(pmem->phys_addr, pmem->size);
201 static int pmem_probe(struct platform_device *pdev)
203 struct pmem_device *pmem;
204 struct resource *res;
206 if (WARN_ON(pdev->num_resources > 1))
209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
213 pmem = pmem_alloc(&pdev->dev, res);
215 return PTR_ERR(pmem);
217 platform_set_drvdata(pdev, pmem);
222 static int pmem_remove(struct platform_device *pdev)
224 struct pmem_device *pmem = platform_get_drvdata(pdev);
230 static struct platform_driver pmem_driver = {
232 .remove = pmem_remove,
234 .owner = THIS_MODULE,
239 static int __init pmem_init(void)
243 pmem_major = register_blkdev(0, "pmem");
247 error = platform_driver_register(&pmem_driver);
249 unregister_blkdev(pmem_major, "pmem");
252 module_init(pmem_init);
254 static void pmem_exit(void)
256 platform_driver_unregister(&pmem_driver);
257 unregister_blkdev(pmem_major, "pmem");
259 module_exit(pmem_exit);
261 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
262 MODULE_LICENSE("GPL v2");