Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
[firefly-linux-kernel-4.4.55.git] / drivers / lightnvm / rrpc.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15  */
16
17 #include "rrpc.h"
18
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
21
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23                                 struct nvm_rq *rqd, unsigned long flags);
24
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26                 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27                         (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
28
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
30 {
31         struct rrpc_block *rblk = a->rblk;
32         unsigned int pg_offset;
33
34         lockdep_assert_held(&rrpc->rev_lock);
35
36         if (a->addr == ADDR_EMPTY || !rblk)
37                 return;
38
39         spin_lock(&rblk->lock);
40
41         div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
42         WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43         rblk->nr_invalid_pages++;
44
45         spin_unlock(&rblk->lock);
46
47         rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
48 }
49
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
51                                                                 unsigned len)
52 {
53         sector_t i;
54
55         spin_lock(&rrpc->rev_lock);
56         for (i = slba; i < slba + len; i++) {
57                 struct rrpc_addr *gp = &rrpc->trans_map[i];
58
59                 rrpc_page_invalidate(rrpc, gp);
60                 gp->rblk = NULL;
61         }
62         spin_unlock(&rrpc->rev_lock);
63 }
64
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66                                         sector_t laddr, unsigned int pages)
67 {
68         struct nvm_rq *rqd;
69         struct rrpc_inflight_rq *inf;
70
71         rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
72         if (!rqd)
73                 return ERR_PTR(-ENOMEM);
74
75         inf = rrpc_get_inflight_rq(rqd);
76         if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77                 mempool_free(rqd, rrpc->rq_pool);
78                 return NULL;
79         }
80
81         return rqd;
82 }
83
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
85 {
86         struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
87
88         rrpc_unlock_laddr(rrpc, inf);
89
90         mempool_free(rqd, rrpc->rq_pool);
91 }
92
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
94 {
95         sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96         sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
97         struct nvm_rq *rqd;
98
99         do {
100                 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
101                 schedule();
102         } while (!rqd);
103
104         if (IS_ERR(rqd)) {
105                 pr_err("rrpc: unable to acquire inflight IO\n");
106                 bio_io_error(bio);
107                 return;
108         }
109
110         rrpc_invalidate_range(rrpc, slba, len);
111         rrpc_inflight_laddr_release(rrpc, rqd);
112 }
113
114 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
115 {
116         return (rblk->next_page == rrpc->dev->pgs_per_blk);
117 }
118
119 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
120 {
121         struct nvm_block *blk = rblk->parent;
122
123         return blk->id * rrpc->dev->pgs_per_blk;
124 }
125
126 static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
127                                                         struct ppa_addr r)
128 {
129         struct ppa_addr l;
130         int secs, pgs, blks, luns;
131         sector_t ppa = r.ppa;
132
133         l.ppa = 0;
134
135         div_u64_rem(ppa, dev->sec_per_pg, &secs);
136         l.g.sec = secs;
137
138         sector_div(ppa, dev->sec_per_pg);
139         div_u64_rem(ppa, dev->sec_per_blk, &pgs);
140         l.g.pg = pgs;
141
142         sector_div(ppa, dev->pgs_per_blk);
143         div_u64_rem(ppa, dev->blks_per_lun, &blks);
144         l.g.blk = blks;
145
146         sector_div(ppa, dev->blks_per_lun);
147         div_u64_rem(ppa, dev->luns_per_chnl, &luns);
148         l.g.lun = luns;
149
150         sector_div(ppa, dev->luns_per_chnl);
151         l.g.ch = ppa;
152
153         return l;
154 }
155
156 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
157 {
158         struct ppa_addr paddr;
159
160         paddr.ppa = addr;
161         return linear_to_generic_addr(dev, paddr);
162 }
163
164 /* requires lun->lock taken */
165 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
166 {
167         struct rrpc *rrpc = rlun->rrpc;
168
169         BUG_ON(!rblk);
170
171         if (rlun->cur) {
172                 spin_lock(&rlun->cur->lock);
173                 WARN_ON(!block_is_full(rrpc, rlun->cur));
174                 spin_unlock(&rlun->cur->lock);
175         }
176         rlun->cur = rblk;
177 }
178
179 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
180                                                         unsigned long flags)
181 {
182         struct nvm_block *blk;
183         struct rrpc_block *rblk;
184
185         blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
186         if (!blk)
187                 return NULL;
188
189         rblk = &rlun->blocks[blk->id];
190         blk->priv = rblk;
191
192         bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
193         rblk->next_page = 0;
194         rblk->nr_invalid_pages = 0;
195         atomic_set(&rblk->data_cmnt_size, 0);
196
197         return rblk;
198 }
199
200 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
201 {
202         nvm_put_blk(rrpc->dev, rblk->parent);
203 }
204
205 static void rrpc_put_blks(struct rrpc *rrpc)
206 {
207         struct rrpc_lun *rlun;
208         int i;
209
210         for (i = 0; i < rrpc->nr_luns; i++) {
211                 rlun = &rrpc->luns[i];
212                 if (rlun->cur)
213                         rrpc_put_blk(rrpc, rlun->cur);
214                 if (rlun->gc_cur)
215                         rrpc_put_blk(rrpc, rlun->gc_cur);
216         }
217 }
218
219 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
220 {
221         int next = atomic_inc_return(&rrpc->next_lun);
222
223         return &rrpc->luns[next % rrpc->nr_luns];
224 }
225
226 static void rrpc_gc_kick(struct rrpc *rrpc)
227 {
228         struct rrpc_lun *rlun;
229         unsigned int i;
230
231         for (i = 0; i < rrpc->nr_luns; i++) {
232                 rlun = &rrpc->luns[i];
233                 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
234         }
235 }
236
237 /*
238  * timed GC every interval.
239  */
240 static void rrpc_gc_timer(unsigned long data)
241 {
242         struct rrpc *rrpc = (struct rrpc *)data;
243
244         rrpc_gc_kick(rrpc);
245         mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
246 }
247
248 static void rrpc_end_sync_bio(struct bio *bio)
249 {
250         struct completion *waiting = bio->bi_private;
251
252         if (bio->bi_error)
253                 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
254
255         complete(waiting);
256 }
257
258 /*
259  * rrpc_move_valid_pages -- migrate live data off the block
260  * @rrpc: the 'rrpc' structure
261  * @block: the block from which to migrate live pages
262  *
263  * Description:
264  *   GC algorithms may call this function to migrate remaining live
265  *   pages off the block prior to erasing it. This function blocks
266  *   further execution until the operation is complete.
267  */
268 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
269 {
270         struct request_queue *q = rrpc->dev->q;
271         struct rrpc_rev_addr *rev;
272         struct nvm_rq *rqd;
273         struct bio *bio;
274         struct page *page;
275         int slot;
276         int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
277         u64 phys_addr;
278         DECLARE_COMPLETION_ONSTACK(wait);
279
280         if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
281                 return 0;
282
283         bio = bio_alloc(GFP_NOIO, 1);
284         if (!bio) {
285                 pr_err("nvm: could not alloc bio to gc\n");
286                 return -ENOMEM;
287         }
288
289         page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
290         if (!page)
291                 return -ENOMEM;
292
293         while ((slot = find_first_zero_bit(rblk->invalid_pages,
294                                             nr_pgs_per_blk)) < nr_pgs_per_blk) {
295
296                 /* Lock laddr */
297                 phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
298
299 try:
300                 spin_lock(&rrpc->rev_lock);
301                 /* Get logical address from physical to logical table */
302                 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
303                 /* already updated by previous regular write */
304                 if (rev->addr == ADDR_EMPTY) {
305                         spin_unlock(&rrpc->rev_lock);
306                         continue;
307                 }
308
309                 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
310                 if (IS_ERR_OR_NULL(rqd)) {
311                         spin_unlock(&rrpc->rev_lock);
312                         schedule();
313                         goto try;
314                 }
315
316                 spin_unlock(&rrpc->rev_lock);
317
318                 /* Perform read to do GC */
319                 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
320                 bio->bi_rw = READ;
321                 bio->bi_private = &wait;
322                 bio->bi_end_io = rrpc_end_sync_bio;
323
324                 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
325                 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
326
327                 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
328                         pr_err("rrpc: gc read failed.\n");
329                         rrpc_inflight_laddr_release(rrpc, rqd);
330                         goto finished;
331                 }
332                 wait_for_completion_io(&wait);
333
334                 bio_reset(bio);
335                 reinit_completion(&wait);
336
337                 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
338                 bio->bi_rw = WRITE;
339                 bio->bi_private = &wait;
340                 bio->bi_end_io = rrpc_end_sync_bio;
341
342                 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
343
344                 /* turn the command around and write the data back to a new
345                  * address
346                  */
347                 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
348                         pr_err("rrpc: gc write failed.\n");
349                         rrpc_inflight_laddr_release(rrpc, rqd);
350                         goto finished;
351                 }
352                 wait_for_completion_io(&wait);
353
354                 rrpc_inflight_laddr_release(rrpc, rqd);
355
356                 bio_reset(bio);
357         }
358
359 finished:
360         mempool_free(page, rrpc->page_pool);
361         bio_put(bio);
362
363         if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
364                 pr_err("nvm: failed to garbage collect block\n");
365                 return -EIO;
366         }
367
368         return 0;
369 }
370
371 static void rrpc_block_gc(struct work_struct *work)
372 {
373         struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
374                                                                         ws_gc);
375         struct rrpc *rrpc = gcb->rrpc;
376         struct rrpc_block *rblk = gcb->rblk;
377         struct nvm_dev *dev = rrpc->dev;
378
379         pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
380
381         if (rrpc_move_valid_pages(rrpc, rblk))
382                 goto done;
383
384         nvm_erase_blk(dev, rblk->parent);
385         rrpc_put_blk(rrpc, rblk);
386 done:
387         mempool_free(gcb, rrpc->gcb_pool);
388 }
389
390 /* the block with highest number of invalid pages, will be in the beginning
391  * of the list
392  */
393 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
394                                                         struct rrpc_block *rb)
395 {
396         if (ra->nr_invalid_pages == rb->nr_invalid_pages)
397                 return ra;
398
399         return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
400 }
401
402 /* linearly find the block with highest number of invalid pages
403  * requires lun->lock
404  */
405 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
406 {
407         struct list_head *prio_list = &rlun->prio_list;
408         struct rrpc_block *rblock, *max;
409
410         BUG_ON(list_empty(prio_list));
411
412         max = list_first_entry(prio_list, struct rrpc_block, prio);
413         list_for_each_entry(rblock, prio_list, prio)
414                 max = rblock_max_invalid(max, rblock);
415
416         return max;
417 }
418
419 static void rrpc_lun_gc(struct work_struct *work)
420 {
421         struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
422         struct rrpc *rrpc = rlun->rrpc;
423         struct nvm_lun *lun = rlun->parent;
424         struct rrpc_block_gc *gcb;
425         unsigned int nr_blocks_need;
426
427         nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
428
429         if (nr_blocks_need < rrpc->nr_luns)
430                 nr_blocks_need = rrpc->nr_luns;
431
432         spin_lock(&rlun->lock);
433         while (nr_blocks_need > lun->nr_free_blocks &&
434                                         !list_empty(&rlun->prio_list)) {
435                 struct rrpc_block *rblock = block_prio_find_max(rlun);
436                 struct nvm_block *block = rblock->parent;
437
438                 if (!rblock->nr_invalid_pages)
439                         break;
440
441                 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
442                 if (!gcb)
443                         break;
444
445                 list_del_init(&rblock->prio);
446
447                 BUG_ON(!block_is_full(rrpc, rblock));
448
449                 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
450
451                 gcb->rrpc = rrpc;
452                 gcb->rblk = rblock;
453                 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
454
455                 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
456
457                 nr_blocks_need--;
458         }
459         spin_unlock(&rlun->lock);
460
461         /* TODO: Hint that request queue can be started again */
462 }
463
464 static void rrpc_gc_queue(struct work_struct *work)
465 {
466         struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
467                                                                         ws_gc);
468         struct rrpc *rrpc = gcb->rrpc;
469         struct rrpc_block *rblk = gcb->rblk;
470         struct nvm_lun *lun = rblk->parent->lun;
471         struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
472
473         spin_lock(&rlun->lock);
474         list_add_tail(&rblk->prio, &rlun->prio_list);
475         spin_unlock(&rlun->lock);
476
477         mempool_free(gcb, rrpc->gcb_pool);
478         pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
479                                                         rblk->parent->id);
480 }
481
482 static const struct block_device_operations rrpc_fops = {
483         .owner          = THIS_MODULE,
484 };
485
486 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
487 {
488         unsigned int i;
489         struct rrpc_lun *rlun, *max_free;
490
491         if (!is_gc)
492                 return get_next_lun(rrpc);
493
494         /* during GC, we don't care about RR, instead we want to make
495          * sure that we maintain evenness between the block luns.
496          */
497         max_free = &rrpc->luns[0];
498         /* prevent GC-ing lun from devouring pages of a lun with
499          * little free blocks. We don't take the lock as we only need an
500          * estimate.
501          */
502         rrpc_for_each_lun(rrpc, rlun, i) {
503                 if (rlun->parent->nr_free_blocks >
504                                         max_free->parent->nr_free_blocks)
505                         max_free = rlun;
506         }
507
508         return max_free;
509 }
510
511 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
512                                         struct rrpc_block *rblk, u64 paddr)
513 {
514         struct rrpc_addr *gp;
515         struct rrpc_rev_addr *rev;
516
517         BUG_ON(laddr >= rrpc->nr_pages);
518
519         gp = &rrpc->trans_map[laddr];
520         spin_lock(&rrpc->rev_lock);
521         if (gp->rblk)
522                 rrpc_page_invalidate(rrpc, gp);
523
524         gp->addr = paddr;
525         gp->rblk = rblk;
526
527         rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
528         rev->addr = laddr;
529         spin_unlock(&rrpc->rev_lock);
530
531         return gp;
532 }
533
534 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
535 {
536         u64 addr = ADDR_EMPTY;
537
538         spin_lock(&rblk->lock);
539         if (block_is_full(rrpc, rblk))
540                 goto out;
541
542         addr = block_to_addr(rrpc, rblk) + rblk->next_page;
543
544         rblk->next_page++;
545 out:
546         spin_unlock(&rblk->lock);
547         return addr;
548 }
549
550 /* Simple round-robin Logical to physical address translation.
551  *
552  * Retrieve the mapping using the active append point. Then update the ap for
553  * the next write to the disk.
554  *
555  * Returns rrpc_addr with the physical address and block. Remember to return to
556  * rrpc->addr_cache when request is finished.
557  */
558 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
559                                                                 int is_gc)
560 {
561         struct rrpc_lun *rlun;
562         struct rrpc_block *rblk;
563         struct nvm_lun *lun;
564         u64 paddr;
565
566         rlun = rrpc_get_lun_rr(rrpc, is_gc);
567         lun = rlun->parent;
568
569         if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
570                 return NULL;
571
572         spin_lock(&rlun->lock);
573
574         rblk = rlun->cur;
575 retry:
576         paddr = rrpc_alloc_addr(rrpc, rblk);
577
578         if (paddr == ADDR_EMPTY) {
579                 rblk = rrpc_get_blk(rrpc, rlun, 0);
580                 if (rblk) {
581                         rrpc_set_lun_cur(rlun, rblk);
582                         goto retry;
583                 }
584
585                 if (is_gc) {
586                         /* retry from emergency gc block */
587                         paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
588                         if (paddr == ADDR_EMPTY) {
589                                 rblk = rrpc_get_blk(rrpc, rlun, 1);
590                                 if (!rblk) {
591                                         pr_err("rrpc: no more blocks");
592                                         goto err;
593                                 }
594
595                                 rlun->gc_cur = rblk;
596                                 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
597                         }
598                         rblk = rlun->gc_cur;
599                 }
600         }
601
602         spin_unlock(&rlun->lock);
603         return rrpc_update_map(rrpc, laddr, rblk, paddr);
604 err:
605         spin_unlock(&rlun->lock);
606         return NULL;
607 }
608
609 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
610 {
611         struct rrpc_block_gc *gcb;
612
613         gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
614         if (!gcb) {
615                 pr_err("rrpc: unable to queue block for gc.");
616                 return;
617         }
618
619         gcb->rrpc = rrpc;
620         gcb->rblk = rblk;
621
622         INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
623         queue_work(rrpc->kgc_wq, &gcb->ws_gc);
624 }
625
626 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
627                                                 sector_t laddr, uint8_t npages)
628 {
629         struct rrpc_addr *p;
630         struct rrpc_block *rblk;
631         struct nvm_lun *lun;
632         int cmnt_size, i;
633
634         for (i = 0; i < npages; i++) {
635                 p = &rrpc->trans_map[laddr + i];
636                 rblk = p->rblk;
637                 lun = rblk->parent->lun;
638
639                 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
640                 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
641                         rrpc_run_gc(rrpc, rblk);
642         }
643 }
644
645 static int rrpc_end_io(struct nvm_rq *rqd, int error)
646 {
647         struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
648         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
649         uint8_t npages = rqd->nr_pages;
650         sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
651
652         if (bio_data_dir(rqd->bio) == WRITE)
653                 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
654
655         bio_put(rqd->bio);
656
657         if (rrqd->flags & NVM_IOTYPE_GC)
658                 return 0;
659
660         rrpc_unlock_rq(rrpc, rqd);
661
662         if (npages > 1)
663                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
664         if (rqd->metadata)
665                 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
666
667         mempool_free(rqd, rrpc->rq_pool);
668
669         return 0;
670 }
671
672 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
673                         struct nvm_rq *rqd, unsigned long flags, int npages)
674 {
675         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
676         struct rrpc_addr *gp;
677         sector_t laddr = rrpc_get_laddr(bio);
678         int is_gc = flags & NVM_IOTYPE_GC;
679         int i;
680
681         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
682                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
683                 return NVM_IO_REQUEUE;
684         }
685
686         for (i = 0; i < npages; i++) {
687                 /* We assume that mapping occurs at 4KB granularity */
688                 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
689                 gp = &rrpc->trans_map[laddr + i];
690
691                 if (gp->rblk) {
692                         rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
693                                                                 gp->addr);
694                 } else {
695                         BUG_ON(is_gc);
696                         rrpc_unlock_laddr(rrpc, r);
697                         nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
698                                                         rqd->dma_ppa_list);
699                         return NVM_IO_DONE;
700                 }
701         }
702
703         rqd->opcode = NVM_OP_HBREAD;
704
705         return NVM_IO_OK;
706 }
707
708 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
709                                                         unsigned long flags)
710 {
711         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
712         int is_gc = flags & NVM_IOTYPE_GC;
713         sector_t laddr = rrpc_get_laddr(bio);
714         struct rrpc_addr *gp;
715
716         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
717                 return NVM_IO_REQUEUE;
718
719         BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
720         gp = &rrpc->trans_map[laddr];
721
722         if (gp->rblk) {
723                 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
724         } else {
725                 BUG_ON(is_gc);
726                 rrpc_unlock_rq(rrpc, rqd);
727                 return NVM_IO_DONE;
728         }
729
730         rqd->opcode = NVM_OP_HBREAD;
731         rrqd->addr = gp;
732
733         return NVM_IO_OK;
734 }
735
736 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
737                         struct nvm_rq *rqd, unsigned long flags, int npages)
738 {
739         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
740         struct rrpc_addr *p;
741         sector_t laddr = rrpc_get_laddr(bio);
742         int is_gc = flags & NVM_IOTYPE_GC;
743         int i;
744
745         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
746                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
747                 return NVM_IO_REQUEUE;
748         }
749
750         for (i = 0; i < npages; i++) {
751                 /* We assume that mapping occurs at 4KB granularity */
752                 p = rrpc_map_page(rrpc, laddr + i, is_gc);
753                 if (!p) {
754                         BUG_ON(is_gc);
755                         rrpc_unlock_laddr(rrpc, r);
756                         nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
757                                                         rqd->dma_ppa_list);
758                         rrpc_gc_kick(rrpc);
759                         return NVM_IO_REQUEUE;
760                 }
761
762                 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
763                                                                 p->addr);
764         }
765
766         rqd->opcode = NVM_OP_HBWRITE;
767
768         return NVM_IO_OK;
769 }
770
771 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
772                                 struct nvm_rq *rqd, unsigned long flags)
773 {
774         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
775         struct rrpc_addr *p;
776         int is_gc = flags & NVM_IOTYPE_GC;
777         sector_t laddr = rrpc_get_laddr(bio);
778
779         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
780                 return NVM_IO_REQUEUE;
781
782         p = rrpc_map_page(rrpc, laddr, is_gc);
783         if (!p) {
784                 BUG_ON(is_gc);
785                 rrpc_unlock_rq(rrpc, rqd);
786                 rrpc_gc_kick(rrpc);
787                 return NVM_IO_REQUEUE;
788         }
789
790         rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
791         rqd->opcode = NVM_OP_HBWRITE;
792         rrqd->addr = p;
793
794         return NVM_IO_OK;
795 }
796
797 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
798                         struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
799 {
800         if (npages > 1) {
801                 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
802                                                         &rqd->dma_ppa_list);
803                 if (!rqd->ppa_list) {
804                         pr_err("rrpc: not able to allocate ppa list\n");
805                         return NVM_IO_ERR;
806                 }
807
808                 if (bio_rw(bio) == WRITE)
809                         return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
810                                                                         npages);
811
812                 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
813         }
814
815         if (bio_rw(bio) == WRITE)
816                 return rrpc_write_rq(rrpc, bio, rqd, flags);
817
818         return rrpc_read_rq(rrpc, bio, rqd, flags);
819 }
820
821 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
822                                 struct nvm_rq *rqd, unsigned long flags)
823 {
824         int err;
825         struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
826         uint8_t nr_pages = rrpc_get_pages(bio);
827         int bio_size = bio_sectors(bio) << 9;
828
829         if (bio_size < rrpc->dev->sec_size)
830                 return NVM_IO_ERR;
831         else if (bio_size > rrpc->dev->max_rq_size)
832                 return NVM_IO_ERR;
833
834         err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
835         if (err)
836                 return err;
837
838         bio_get(bio);
839         rqd->bio = bio;
840         rqd->ins = &rrpc->instance;
841         rqd->nr_pages = nr_pages;
842         rrq->flags = flags;
843
844         err = nvm_submit_io(rrpc->dev, rqd);
845         if (err) {
846                 pr_err("rrpc: I/O submission failed: %d\n", err);
847                 bio_put(bio);
848                 if (!(flags & NVM_IOTYPE_GC)) {
849                         rrpc_unlock_rq(rrpc, rqd);
850                         if (rqd->nr_pages > 1)
851                                 nvm_dev_dma_free(rrpc->dev,
852                         rqd->ppa_list, rqd->dma_ppa_list);
853                 }
854                 return NVM_IO_ERR;
855         }
856
857         return NVM_IO_OK;
858 }
859
860 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
861 {
862         struct rrpc *rrpc = q->queuedata;
863         struct nvm_rq *rqd;
864         int err;
865
866         if (bio->bi_rw & REQ_DISCARD) {
867                 rrpc_discard(rrpc, bio);
868                 return BLK_QC_T_NONE;
869         }
870
871         rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
872         if (!rqd) {
873                 pr_err_ratelimited("rrpc: not able to queue bio.");
874                 bio_io_error(bio);
875                 return BLK_QC_T_NONE;
876         }
877         memset(rqd, 0, sizeof(struct nvm_rq));
878
879         err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
880         switch (err) {
881         case NVM_IO_OK:
882                 return BLK_QC_T_NONE;
883         case NVM_IO_ERR:
884                 bio_io_error(bio);
885                 break;
886         case NVM_IO_DONE:
887                 bio_endio(bio);
888                 break;
889         case NVM_IO_REQUEUE:
890                 spin_lock(&rrpc->bio_lock);
891                 bio_list_add(&rrpc->requeue_bios, bio);
892                 spin_unlock(&rrpc->bio_lock);
893                 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
894                 break;
895         }
896
897         mempool_free(rqd, rrpc->rq_pool);
898         return BLK_QC_T_NONE;
899 }
900
901 static void rrpc_requeue(struct work_struct *work)
902 {
903         struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
904         struct bio_list bios;
905         struct bio *bio;
906
907         bio_list_init(&bios);
908
909         spin_lock(&rrpc->bio_lock);
910         bio_list_merge(&bios, &rrpc->requeue_bios);
911         bio_list_init(&rrpc->requeue_bios);
912         spin_unlock(&rrpc->bio_lock);
913
914         while ((bio = bio_list_pop(&bios)))
915                 rrpc_make_rq(rrpc->disk->queue, bio);
916 }
917
918 static void rrpc_gc_free(struct rrpc *rrpc)
919 {
920         struct rrpc_lun *rlun;
921         int i;
922
923         if (rrpc->krqd_wq)
924                 destroy_workqueue(rrpc->krqd_wq);
925
926         if (rrpc->kgc_wq)
927                 destroy_workqueue(rrpc->kgc_wq);
928
929         if (!rrpc->luns)
930                 return;
931
932         for (i = 0; i < rrpc->nr_luns; i++) {
933                 rlun = &rrpc->luns[i];
934
935                 if (!rlun->blocks)
936                         break;
937                 vfree(rlun->blocks);
938         }
939 }
940
941 static int rrpc_gc_init(struct rrpc *rrpc)
942 {
943         rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
944                                                                 rrpc->nr_luns);
945         if (!rrpc->krqd_wq)
946                 return -ENOMEM;
947
948         rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
949         if (!rrpc->kgc_wq)
950                 return -ENOMEM;
951
952         setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
953
954         return 0;
955 }
956
957 static void rrpc_map_free(struct rrpc *rrpc)
958 {
959         vfree(rrpc->rev_trans_map);
960         vfree(rrpc->trans_map);
961 }
962
963 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
964 {
965         struct rrpc *rrpc = (struct rrpc *)private;
966         struct nvm_dev *dev = rrpc->dev;
967         struct rrpc_addr *addr = rrpc->trans_map + slba;
968         struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
969         sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
970         u64 elba = slba + nlb;
971         u64 i;
972
973         if (unlikely(elba > dev->total_pages)) {
974                 pr_err("nvm: L2P data from device is out of bounds!\n");
975                 return -EINVAL;
976         }
977
978         for (i = 0; i < nlb; i++) {
979                 u64 pba = le64_to_cpu(entries[i]);
980                 /* LNVM treats address-spaces as silos, LBA and PBA are
981                  * equally large and zero-indexed.
982                  */
983                 if (unlikely(pba >= max_pages && pba != U64_MAX)) {
984                         pr_err("nvm: L2P data entry is out of bounds!\n");
985                         return -EINVAL;
986                 }
987
988                 /* Address zero is a special one. The first page on a disk is
989                  * protected. As it often holds internal device boot
990                  * information.
991                  */
992                 if (!pba)
993                         continue;
994
995                 addr[i].addr = pba;
996                 raddr[pba].addr = slba + i;
997         }
998
999         return 0;
1000 }
1001
1002 static int rrpc_map_init(struct rrpc *rrpc)
1003 {
1004         struct nvm_dev *dev = rrpc->dev;
1005         sector_t i;
1006         int ret;
1007
1008         rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
1009         if (!rrpc->trans_map)
1010                 return -ENOMEM;
1011
1012         rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1013                                                         * rrpc->nr_pages);
1014         if (!rrpc->rev_trans_map)
1015                 return -ENOMEM;
1016
1017         for (i = 0; i < rrpc->nr_pages; i++) {
1018                 struct rrpc_addr *p = &rrpc->trans_map[i];
1019                 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1020
1021                 p->addr = ADDR_EMPTY;
1022                 r->addr = ADDR_EMPTY;
1023         }
1024
1025         if (!dev->ops->get_l2p_tbl)
1026                 return 0;
1027
1028         /* Bring up the mapping table from device */
1029         ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
1030                                                         rrpc_l2p_update, rrpc);
1031         if (ret) {
1032                 pr_err("nvm: rrpc: could not read L2P table.\n");
1033                 return -EINVAL;
1034         }
1035
1036         return 0;
1037 }
1038
1039
1040 /* Minimum pages needed within a lun */
1041 #define PAGE_POOL_SIZE 16
1042 #define ADDR_POOL_SIZE 64
1043
1044 static int rrpc_core_init(struct rrpc *rrpc)
1045 {
1046         down_write(&rrpc_lock);
1047         if (!rrpc_gcb_cache) {
1048                 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1049                                 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1050                 if (!rrpc_gcb_cache) {
1051                         up_write(&rrpc_lock);
1052                         return -ENOMEM;
1053                 }
1054
1055                 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1056                                 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1057                                 0, 0, NULL);
1058                 if (!rrpc_rq_cache) {
1059                         kmem_cache_destroy(rrpc_gcb_cache);
1060                         up_write(&rrpc_lock);
1061                         return -ENOMEM;
1062                 }
1063         }
1064         up_write(&rrpc_lock);
1065
1066         rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1067         if (!rrpc->page_pool)
1068                 return -ENOMEM;
1069
1070         rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1071                                                                 rrpc_gcb_cache);
1072         if (!rrpc->gcb_pool)
1073                 return -ENOMEM;
1074
1075         rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1076         if (!rrpc->rq_pool)
1077                 return -ENOMEM;
1078
1079         spin_lock_init(&rrpc->inflights.lock);
1080         INIT_LIST_HEAD(&rrpc->inflights.reqs);
1081
1082         return 0;
1083 }
1084
1085 static void rrpc_core_free(struct rrpc *rrpc)
1086 {
1087         mempool_destroy(rrpc->page_pool);
1088         mempool_destroy(rrpc->gcb_pool);
1089         mempool_destroy(rrpc->rq_pool);
1090 }
1091
1092 static void rrpc_luns_free(struct rrpc *rrpc)
1093 {
1094         kfree(rrpc->luns);
1095 }
1096
1097 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1098 {
1099         struct nvm_dev *dev = rrpc->dev;
1100         struct rrpc_lun *rlun;
1101         int i, j;
1102
1103         spin_lock_init(&rrpc->rev_lock);
1104
1105         rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1106                                                                 GFP_KERNEL);
1107         if (!rrpc->luns)
1108                 return -ENOMEM;
1109
1110         /* 1:1 mapping */
1111         for (i = 0; i < rrpc->nr_luns; i++) {
1112                 struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
1113
1114                 if (dev->pgs_per_blk >
1115                                 MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1116                         pr_err("rrpc: number of pages per block too high.");
1117                         goto err;
1118                 }
1119
1120                 rlun = &rrpc->luns[i];
1121                 rlun->rrpc = rrpc;
1122                 rlun->parent = lun;
1123                 INIT_LIST_HEAD(&rlun->prio_list);
1124                 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1125                 spin_lock_init(&rlun->lock);
1126
1127                 rrpc->total_blocks += dev->blks_per_lun;
1128                 rrpc->nr_pages += dev->sec_per_lun;
1129
1130                 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1131                                                 rrpc->dev->blks_per_lun);
1132                 if (!rlun->blocks)
1133                         goto err;
1134
1135                 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1136                         struct rrpc_block *rblk = &rlun->blocks[j];
1137                         struct nvm_block *blk = &lun->blocks[j];
1138
1139                         rblk->parent = blk;
1140                         INIT_LIST_HEAD(&rblk->prio);
1141                         spin_lock_init(&rblk->lock);
1142                 }
1143         }
1144
1145         return 0;
1146 err:
1147         return -ENOMEM;
1148 }
1149
1150 static void rrpc_free(struct rrpc *rrpc)
1151 {
1152         rrpc_gc_free(rrpc);
1153         rrpc_map_free(rrpc);
1154         rrpc_core_free(rrpc);
1155         rrpc_luns_free(rrpc);
1156
1157         kfree(rrpc);
1158 }
1159
1160 static void rrpc_exit(void *private)
1161 {
1162         struct rrpc *rrpc = private;
1163
1164         del_timer(&rrpc->gc_timer);
1165
1166         flush_workqueue(rrpc->krqd_wq);
1167         flush_workqueue(rrpc->kgc_wq);
1168
1169         rrpc_free(rrpc);
1170 }
1171
1172 static sector_t rrpc_capacity(void *private)
1173 {
1174         struct rrpc *rrpc = private;
1175         struct nvm_dev *dev = rrpc->dev;
1176         sector_t reserved, provisioned;
1177
1178         /* cur, gc, and two emergency blocks for each lun */
1179         reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
1180         provisioned = rrpc->nr_pages - reserved;
1181
1182         if (reserved > rrpc->nr_pages) {
1183                 pr_err("rrpc: not enough space available to expose storage.\n");
1184                 return 0;
1185         }
1186
1187         sector_div(provisioned, 10);
1188         return provisioned * 9 * NR_PHY_IN_LOG;
1189 }
1190
1191 /*
1192  * Looks up the logical address from reverse trans map and check if its valid by
1193  * comparing the logical to physical address with the physical address.
1194  * Returns 0 on free, otherwise 1 if in use
1195  */
1196 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1197 {
1198         struct nvm_dev *dev = rrpc->dev;
1199         int offset;
1200         struct rrpc_addr *laddr;
1201         u64 paddr, pladdr;
1202
1203         for (offset = 0; offset < dev->pgs_per_blk; offset++) {
1204                 paddr = block_to_addr(rrpc, rblk) + offset;
1205
1206                 pladdr = rrpc->rev_trans_map[paddr].addr;
1207                 if (pladdr == ADDR_EMPTY)
1208                         continue;
1209
1210                 laddr = &rrpc->trans_map[pladdr];
1211
1212                 if (paddr == laddr->addr) {
1213                         laddr->rblk = rblk;
1214                 } else {
1215                         set_bit(offset, rblk->invalid_pages);
1216                         rblk->nr_invalid_pages++;
1217                 }
1218         }
1219 }
1220
1221 static int rrpc_blocks_init(struct rrpc *rrpc)
1222 {
1223         struct rrpc_lun *rlun;
1224         struct rrpc_block *rblk;
1225         int lun_iter, blk_iter;
1226
1227         for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1228                 rlun = &rrpc->luns[lun_iter];
1229
1230                 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1231                                                                 blk_iter++) {
1232                         rblk = &rlun->blocks[blk_iter];
1233                         rrpc_block_map_update(rrpc, rblk);
1234                 }
1235         }
1236
1237         return 0;
1238 }
1239
1240 static int rrpc_luns_configure(struct rrpc *rrpc)
1241 {
1242         struct rrpc_lun *rlun;
1243         struct rrpc_block *rblk;
1244         int i;
1245
1246         for (i = 0; i < rrpc->nr_luns; i++) {
1247                 rlun = &rrpc->luns[i];
1248
1249                 rblk = rrpc_get_blk(rrpc, rlun, 0);
1250                 if (!rblk)
1251                         goto err;
1252
1253                 rrpc_set_lun_cur(rlun, rblk);
1254
1255                 /* Emergency gc block */
1256                 rblk = rrpc_get_blk(rrpc, rlun, 1);
1257                 if (!rblk)
1258                         goto err;
1259                 rlun->gc_cur = rblk;
1260         }
1261
1262         return 0;
1263 err:
1264         rrpc_put_blks(rrpc);
1265         return -EINVAL;
1266 }
1267
1268 static struct nvm_tgt_type tt_rrpc;
1269
1270 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1271                                                 int lun_begin, int lun_end)
1272 {
1273         struct request_queue *bqueue = dev->q;
1274         struct request_queue *tqueue = tdisk->queue;
1275         struct rrpc *rrpc;
1276         int ret;
1277
1278         if (!(dev->identity.dom & NVM_RSP_L2P)) {
1279                 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1280                                                         dev->identity.dom);
1281                 return ERR_PTR(-EINVAL);
1282         }
1283
1284         rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1285         if (!rrpc)
1286                 return ERR_PTR(-ENOMEM);
1287
1288         rrpc->instance.tt = &tt_rrpc;
1289         rrpc->dev = dev;
1290         rrpc->disk = tdisk;
1291
1292         bio_list_init(&rrpc->requeue_bios);
1293         spin_lock_init(&rrpc->bio_lock);
1294         INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1295
1296         rrpc->nr_luns = lun_end - lun_begin + 1;
1297
1298         /* simple round-robin strategy */
1299         atomic_set(&rrpc->next_lun, -1);
1300
1301         ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1302         if (ret) {
1303                 pr_err("nvm: rrpc: could not initialize luns\n");
1304                 goto err;
1305         }
1306
1307         rrpc->poffset = dev->sec_per_lun * lun_begin;
1308         rrpc->lun_offset = lun_begin;
1309
1310         ret = rrpc_core_init(rrpc);
1311         if (ret) {
1312                 pr_err("nvm: rrpc: could not initialize core\n");
1313                 goto err;
1314         }
1315
1316         ret = rrpc_map_init(rrpc);
1317         if (ret) {
1318                 pr_err("nvm: rrpc: could not initialize maps\n");
1319                 goto err;
1320         }
1321
1322         ret = rrpc_blocks_init(rrpc);
1323         if (ret) {
1324                 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1325                 goto err;
1326         }
1327
1328         ret = rrpc_luns_configure(rrpc);
1329         if (ret) {
1330                 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1331                 goto err;
1332         }
1333
1334         ret = rrpc_gc_init(rrpc);
1335         if (ret) {
1336                 pr_err("nvm: rrpc: could not initialize gc\n");
1337                 goto err;
1338         }
1339
1340         /* inherit the size from the underlying device */
1341         blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1342         blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1343
1344         pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1345                         rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
1346
1347         mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1348
1349         return rrpc;
1350 err:
1351         rrpc_free(rrpc);
1352         return ERR_PTR(ret);
1353 }
1354
1355 /* round robin, page-based FTL, and cost-based GC */
1356 static struct nvm_tgt_type tt_rrpc = {
1357         .name           = "rrpc",
1358         .version        = {1, 0, 0},
1359
1360         .make_rq        = rrpc_make_rq,
1361         .capacity       = rrpc_capacity,
1362         .end_io         = rrpc_end_io,
1363
1364         .init           = rrpc_init,
1365         .exit           = rrpc_exit,
1366 };
1367
1368 static int __init rrpc_module_init(void)
1369 {
1370         return nvm_register_target(&tt_rrpc);
1371 }
1372
1373 static void rrpc_module_exit(void)
1374 {
1375         nvm_unregister_target(&tt_rrpc);
1376 }
1377
1378 module_init(rrpc_module_init);
1379 module_exit(rrpc_module_exit);
1380 MODULE_LICENSE("GPL v2");
1381 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");