2 * Block driver for s390 storage class memory.
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
8 #define KMSG_COMPONENT "scm_block"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/interrupt.h>
12 #include <linux/spinlock.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/genhd.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
21 debug_info_t *scm_debug;
23 static DEFINE_SPINLOCK(list_lock);
24 static LIST_HEAD(inactive_requests);
25 static unsigned int nr_requests = 64;
26 static atomic_t nr_devices = ATOMIC_INIT(0);
27 module_param(nr_requests, uint, S_IRUGO);
28 MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
30 MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31 MODULE_LICENSE("GPL");
32 MODULE_ALIAS("scm:scmdev*");
34 static void __scm_free_rq(struct scm_request *scmrq)
36 struct aob_rq_header *aobrq = to_aobrq(scmrq);
38 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
40 __scm_free_rq_cluster(scmrq);
44 static void scm_free_rqs(void)
46 struct list_head *iter, *safe;
47 struct scm_request *scmrq;
49 spin_lock_irq(&list_lock);
50 list_for_each_safe(iter, safe, &inactive_requests) {
51 scmrq = list_entry(iter, struct scm_request, list);
52 list_del(&scmrq->list);
55 spin_unlock_irq(&list_lock);
58 static int __scm_alloc_rq(void)
60 struct aob_rq_header *aobrq;
61 struct scm_request *scmrq;
63 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
67 scmrq = (void *) aobrq->data;
68 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70 if (!scmrq->aob || !scmrq->aidaw) {
75 if (__scm_alloc_rq_cluster(scmrq)) {
80 INIT_LIST_HEAD(&scmrq->list);
81 spin_lock_irq(&list_lock);
82 list_add(&scmrq->list, &inactive_requests);
83 spin_unlock_irq(&list_lock);
88 static int scm_alloc_rqs(unsigned int nrqs)
92 while (nrqs-- && !ret)
93 ret = __scm_alloc_rq();
98 static struct scm_request *scm_request_fetch(void)
100 struct scm_request *scmrq = NULL;
102 spin_lock(&list_lock);
103 if (list_empty(&inactive_requests))
105 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
106 list_del(&scmrq->list);
108 spin_unlock(&list_lock);
112 static void scm_request_done(struct scm_request *scmrq)
116 spin_lock_irqsave(&list_lock, flags);
117 list_add(&scmrq->list, &inactive_requests);
118 spin_unlock_irqrestore(&list_lock, flags);
121 static int scm_open(struct block_device *blkdev, fmode_t mode)
123 return scm_get_ref();
126 static int scm_release(struct gendisk *gendisk, fmode_t mode)
132 static const struct block_device_operations scm_blk_devops = {
133 .owner = THIS_MODULE,
135 .release = scm_release,
138 static void scm_request_prepare(struct scm_request *scmrq)
140 struct scm_blk_dev *bdev = scmrq->bdev;
141 struct scm_device *scmdev = bdev->gendisk->private_data;
142 struct aidaw *aidaw = scmrq->aidaw;
143 struct msb *msb = &scmrq->aob->msb[0];
144 struct req_iterator iter;
148 scmrq->aob->request.msb_count = 1;
149 msb->scm_addr = scmdev->address +
150 ((u64) blk_rq_pos(scmrq->request) << 9);
151 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
152 MSB_OC_READ : MSB_OC_WRITE;
153 msb->flags |= MSB_FLAG_IDA;
154 msb->data_addr = (u64) aidaw;
156 rq_for_each_segment(bv, scmrq->request, iter) {
157 WARN_ON(bv->bv_offset);
158 msb->blk_count += bv->bv_len >> 12;
159 aidaw->data_addr = (u64) page_address(bv->bv_page);
164 static inline void scm_request_init(struct scm_blk_dev *bdev,
165 struct scm_request *scmrq,
168 struct aob_rq_header *aobrq = to_aobrq(scmrq);
169 struct aob *aob = scmrq->aob;
171 memset(aob, 0, sizeof(*aob));
172 memset(scmrq->aidaw, 0, PAGE_SIZE);
173 aobrq->scmdev = bdev->scmdev;
174 aob->request.cmd_code = ARQB_CMD_MOVE;
175 aob->request.data = (u64) aobrq;
176 scmrq->request = req;
180 scm_request_cluster_init(scmrq);
183 static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
185 if (atomic_read(&bdev->queued_reqs)) {
186 /* Queue restart is triggered by the next interrupt. */
189 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
192 void scm_request_requeue(struct scm_request *scmrq)
194 struct scm_blk_dev *bdev = scmrq->bdev;
196 scm_release_cluster(scmrq);
197 blk_requeue_request(bdev->rq, scmrq->request);
198 atomic_dec(&bdev->queued_reqs);
199 scm_request_done(scmrq);
200 scm_ensure_queue_restart(bdev);
203 void scm_request_finish(struct scm_request *scmrq)
205 struct scm_blk_dev *bdev = scmrq->bdev;
207 scm_release_cluster(scmrq);
208 blk_end_request_all(scmrq->request, scmrq->error);
209 atomic_dec(&bdev->queued_reqs);
210 scm_request_done(scmrq);
213 static void scm_blk_request(struct request_queue *rq)
215 struct scm_device *scmdev = rq->queuedata;
216 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
217 struct scm_request *scmrq;
221 while ((req = blk_peek_request(rq))) {
222 if (req->cmd_type != REQ_TYPE_FS)
225 scmrq = scm_request_fetch();
227 SCM_LOG(5, "no request");
228 scm_ensure_queue_restart(bdev);
231 scm_request_init(bdev, scmrq, req);
232 if (!scm_reserve_cluster(scmrq)) {
233 SCM_LOG(5, "cluster busy");
234 scm_request_done(scmrq);
237 if (scm_need_cluster_request(scmrq)) {
238 atomic_inc(&bdev->queued_reqs);
239 blk_start_request(req);
240 scm_initiate_cluster_request(scmrq);
243 scm_request_prepare(scmrq);
244 atomic_inc(&bdev->queued_reqs);
245 blk_start_request(req);
247 ret = scm_start_aob(scmrq->aob);
249 SCM_LOG(5, "no subchannel");
250 scm_request_requeue(scmrq);
256 static void __scmrq_log_error(struct scm_request *scmrq)
258 struct aob *aob = scmrq->aob;
260 if (scmrq->error == -ETIMEDOUT)
261 SCM_LOG(1, "Request timeout");
263 SCM_LOG(1, "Request error");
264 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
267 SCM_LOG(1, "Retry request");
269 pr_err("An I/O operation to SCM failed with rc=%d\n",
273 void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
275 struct scm_request *scmrq = data;
276 struct scm_blk_dev *bdev = scmrq->bdev;
278 scmrq->error = error;
280 __scmrq_log_error(scmrq);
282 spin_lock(&bdev->lock);
283 list_add_tail(&scmrq->list, &bdev->finished_requests);
284 spin_unlock(&bdev->lock);
285 tasklet_hi_schedule(&bdev->tasklet);
288 static void scm_blk_tasklet(struct scm_blk_dev *bdev)
290 struct scm_request *scmrq;
293 spin_lock_irqsave(&bdev->lock, flags);
294 while (!list_empty(&bdev->finished_requests)) {
295 scmrq = list_first_entry(&bdev->finished_requests,
296 struct scm_request, list);
297 list_del(&scmrq->list);
298 spin_unlock_irqrestore(&bdev->lock, flags);
300 if (scmrq->error && scmrq->retries-- > 0) {
301 if (scm_start_aob(scmrq->aob)) {
302 spin_lock_irqsave(&bdev->rq_lock, flags);
303 scm_request_requeue(scmrq);
304 spin_unlock_irqrestore(&bdev->rq_lock, flags);
306 /* Request restarted or requeued, handle next. */
307 spin_lock_irqsave(&bdev->lock, flags);
311 if (scm_test_cluster_request(scmrq)) {
312 scm_cluster_request_irq(scmrq);
313 spin_lock_irqsave(&bdev->lock, flags);
317 scm_request_finish(scmrq);
318 spin_lock_irqsave(&bdev->lock, flags);
320 spin_unlock_irqrestore(&bdev->lock, flags);
321 /* Look out for more requests. */
322 blk_run_queue(bdev->rq);
325 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
327 struct request_queue *rq;
328 int len, ret = -ENOMEM;
329 unsigned int devindex, nr_max_blk;
331 devindex = atomic_inc_return(&nr_devices) - 1;
332 /* scma..scmz + scmaa..scmzz */
333 if (devindex > 701) {
338 bdev->scmdev = scmdev;
339 spin_lock_init(&bdev->rq_lock);
340 spin_lock_init(&bdev->lock);
341 INIT_LIST_HEAD(&bdev->finished_requests);
342 atomic_set(&bdev->queued_reqs, 0);
343 tasklet_init(&bdev->tasklet,
344 (void (*)(unsigned long)) scm_blk_tasklet,
345 (unsigned long) bdev);
347 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
352 nr_max_blk = min(scmdev->nr_max_block,
353 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
355 blk_queue_logical_block_size(rq, 1 << 12);
356 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
357 blk_queue_max_segments(rq, nr_max_blk);
358 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
359 scm_blk_dev_cluster_setup(bdev);
361 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
365 rq->queuedata = scmdev;
366 bdev->gendisk->driverfs_dev = &scmdev->dev;
367 bdev->gendisk->private_data = scmdev;
368 bdev->gendisk->fops = &scm_blk_devops;
369 bdev->gendisk->queue = rq;
370 bdev->gendisk->major = scm_major;
371 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
373 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
375 len += snprintf(bdev->gendisk->disk_name + len,
376 DISK_NAME_LEN - len, "%c",
377 'a' + (devindex / 26) - 1);
378 devindex = devindex % 26;
380 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
383 /* 512 byte sectors */
384 set_capacity(bdev->gendisk, scmdev->size >> 9);
385 add_disk(bdev->gendisk);
389 blk_cleanup_queue(rq);
391 atomic_dec(&nr_devices);
395 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
397 tasklet_kill(&bdev->tasklet);
398 del_gendisk(bdev->gendisk);
399 blk_cleanup_queue(bdev->gendisk->queue);
400 put_disk(bdev->gendisk);
403 static int __init scm_blk_init(void)
407 if (!scm_cluster_size_valid())
410 ret = register_blkdev(0, "scm");
415 if (scm_alloc_rqs(nr_requests))
418 scm_debug = debug_register("scm_log", 16, 1, 16);
422 debug_register_view(scm_debug, &debug_hex_ascii_view);
423 debug_set_level(scm_debug, 2);
425 ret = scm_drv_init();
432 debug_unregister(scm_debug);
436 unregister_blkdev(scm_major, "scm");
440 module_init(scm_blk_init);
442 static void __exit scm_blk_cleanup(void)
445 debug_unregister(scm_debug);
447 unregister_blkdev(scm_major, "scm");
449 module_exit(scm_blk_cleanup);