lightnvm: refactor spin_unlock in gennvm_get_blk
[firefly-linux-kernel-4.4.55.git] / drivers / lightnvm / gennvm.c
1 /*
2  * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License version
6  * 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; see the file COPYING.  If not, write to
15  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16  * USA.
17  *
18  * Implementation of a generic nvm manager for Open-Channel SSDs.
19  */
20
21 #include "gennvm.h"
22
23 static void gennvm_blocks_free(struct nvm_dev *dev)
24 {
25         struct gen_nvm *gn = dev->mp;
26         struct gen_lun *lun;
27         int i;
28
29         gennvm_for_each_lun(gn, lun, i) {
30                 if (!lun->vlun.blocks)
31                         break;
32                 vfree(lun->vlun.blocks);
33         }
34 }
35
36 static void gennvm_luns_free(struct nvm_dev *dev)
37 {
38         struct gen_nvm *gn = dev->mp;
39
40         kfree(gn->luns);
41 }
42
43 static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
44 {
45         struct gen_lun *lun;
46         int i;
47
48         gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
49         if (!gn->luns)
50                 return -ENOMEM;
51
52         gennvm_for_each_lun(gn, lun, i) {
53                 spin_lock_init(&lun->vlun.lock);
54                 INIT_LIST_HEAD(&lun->free_list);
55                 INIT_LIST_HEAD(&lun->used_list);
56                 INIT_LIST_HEAD(&lun->bb_list);
57
58                 lun->reserved_blocks = 2; /* for GC only */
59                 lun->vlun.id = i;
60                 lun->vlun.lun_id = i % dev->luns_per_chnl;
61                 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62                 lun->vlun.nr_free_blocks = dev->blks_per_lun;
63                 lun->vlun.nr_inuse_blocks = 0;
64                 lun->vlun.nr_bad_blocks = 0;
65         }
66         return 0;
67 }
68
69 static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
70                                                                 void *private)
71 {
72         struct gen_nvm *gn = private;
73         struct nvm_dev *dev = gn->dev;
74         struct gen_lun *lun;
75         struct nvm_block *blk;
76         int i;
77
78         lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
79
80         for (i = 0; i < nr_blocks; i++) {
81                 if (blks[i] == 0)
82                         continue;
83
84                 blk = &lun->vlun.blocks[i];
85                 if (!blk) {
86                         pr_err("gennvm: BB data is out of bounds.\n");
87                         return -EINVAL;
88                 }
89
90                 list_move_tail(&blk->list, &lun->bb_list);
91                 lun->vlun.nr_bad_blocks++;
92         }
93
94         return 0;
95 }
96
97 static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
98 {
99         struct nvm_dev *dev = private;
100         struct gen_nvm *gn = dev->mp;
101         sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
102         u64 elba = slba + nlb;
103         struct gen_lun *lun;
104         struct nvm_block *blk;
105         u64 i;
106         int lun_id;
107
108         if (unlikely(elba > dev->total_pages)) {
109                 pr_err("gennvm: L2P data from device is out of bounds!\n");
110                 return -EINVAL;
111         }
112
113         for (i = 0; i < nlb; i++) {
114                 u64 pba = le64_to_cpu(entries[i]);
115
116                 if (unlikely(pba >= max_pages && pba != U64_MAX)) {
117                         pr_err("gennvm: L2P data entry is out of bounds!\n");
118                         return -EINVAL;
119                 }
120
121                 /* Address zero is a special one. The first page on a disk is
122                  * protected. It often holds internal device boot
123                  * information.
124                  */
125                 if (!pba)
126                         continue;
127
128                 /* resolve block from physical address */
129                 lun_id = div_u64(pba, dev->sec_per_lun);
130                 lun = &gn->luns[lun_id];
131
132                 /* Calculate block offset into lun */
133                 pba = pba - (dev->sec_per_lun * lun_id);
134                 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
135
136                 if (!blk->type) {
137                         /* at this point, we don't know anything about the
138                          * block. It's up to the FTL on top to re-etablish the
139                          * block state
140                          */
141                         list_move_tail(&blk->list, &lun->used_list);
142                         blk->type = 1;
143                         lun->vlun.nr_free_blocks--;
144                         lun->vlun.nr_inuse_blocks++;
145                 }
146         }
147
148         return 0;
149 }
150
151 static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
152 {
153         struct gen_lun *lun;
154         struct nvm_block *block;
155         sector_t lun_iter, blk_iter, cur_block_id = 0;
156         int ret;
157
158         gennvm_for_each_lun(gn, lun, lun_iter) {
159                 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
160                                                         dev->blks_per_lun);
161                 if (!lun->vlun.blocks)
162                         return -ENOMEM;
163
164                 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
165                         block = &lun->vlun.blocks[blk_iter];
166
167                         INIT_LIST_HEAD(&block->list);
168
169                         block->lun = &lun->vlun;
170                         block->id = cur_block_id++;
171
172                         /* First block is reserved for device */
173                         if (unlikely(lun_iter == 0 && blk_iter == 0)) {
174                                 lun->vlun.nr_free_blocks--;
175                                 continue;
176                         }
177
178                         list_add_tail(&block->list, &lun->free_list);
179                 }
180
181                 if (dev->ops->get_bb_tbl) {
182                         struct ppa_addr ppa;
183
184                         ppa.ppa = 0;
185                         ppa.g.ch = lun->vlun.chnl_id;
186                         ppa.g.lun = lun->vlun.id;
187                         ppa = generic_to_dev_addr(dev, ppa);
188
189                         ret = dev->ops->get_bb_tbl(dev, ppa,
190                                                 dev->blks_per_lun,
191                                                 gennvm_block_bb, gn);
192                         if (ret)
193                                 pr_err("gennvm: could not read BB table\n");
194                 }
195         }
196
197         if (dev->ops->get_l2p_tbl) {
198                 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
199                                                         gennvm_block_map, dev);
200                 if (ret) {
201                         pr_err("gennvm: could not read L2P table.\n");
202                         pr_warn("gennvm: default block initialization");
203                 }
204         }
205
206         return 0;
207 }
208
209 static void gennvm_free(struct nvm_dev *dev)
210 {
211         gennvm_blocks_free(dev);
212         gennvm_luns_free(dev);
213         kfree(dev->mp);
214         dev->mp = NULL;
215 }
216
217 static int gennvm_register(struct nvm_dev *dev)
218 {
219         struct gen_nvm *gn;
220         int ret;
221
222         gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
223         if (!gn)
224                 return -ENOMEM;
225
226         gn->dev = dev;
227         gn->nr_luns = dev->nr_luns;
228         dev->mp = gn;
229
230         ret = gennvm_luns_init(dev, gn);
231         if (ret) {
232                 pr_err("gennvm: could not initialize luns\n");
233                 goto err;
234         }
235
236         ret = gennvm_blocks_init(dev, gn);
237         if (ret) {
238                 pr_err("gennvm: could not initialize blocks\n");
239                 goto err;
240         }
241
242         return 1;
243 err:
244         gennvm_free(dev);
245         return ret;
246 }
247
248 static void gennvm_unregister(struct nvm_dev *dev)
249 {
250         gennvm_free(dev);
251 }
252
253 static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
254                                 struct nvm_lun *vlun, unsigned long flags)
255 {
256         struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
257         struct nvm_block *blk = NULL;
258         int is_gc = flags & NVM_IOTYPE_GC;
259
260         spin_lock(&vlun->lock);
261
262         if (list_empty(&lun->free_list)) {
263                 pr_err_ratelimited("gennvm: lun %u have no free pages available",
264                                                                 lun->vlun.id);
265                 goto out;
266         }
267
268         if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
269                 goto out;
270
271         blk = list_first_entry(&lun->free_list, struct nvm_block, list);
272         list_move_tail(&blk->list, &lun->used_list);
273         blk->type = 1;
274
275         lun->vlun.nr_free_blocks--;
276         lun->vlun.nr_inuse_blocks++;
277
278 out:
279         spin_unlock(&vlun->lock);
280         return blk;
281 }
282
283 static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
284 {
285         struct nvm_lun *vlun = blk->lun;
286         struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
287
288         spin_lock(&vlun->lock);
289
290         switch (blk->type) {
291         case 1:
292                 list_move_tail(&blk->list, &lun->free_list);
293                 lun->vlun.nr_free_blocks++;
294                 lun->vlun.nr_inuse_blocks--;
295                 blk->type = 0;
296                 break;
297         case 2:
298                 list_move_tail(&blk->list, &lun->bb_list);
299                 lun->vlun.nr_bad_blocks++;
300                 lun->vlun.nr_inuse_blocks--;
301                 break;
302         default:
303                 WARN_ON_ONCE(1);
304                 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
305                                                         blk->id, blk->type);
306                 list_move_tail(&blk->list, &lun->bb_list);
307                 lun->vlun.nr_bad_blocks++;
308                 lun->vlun.nr_inuse_blocks--;
309         }
310
311         spin_unlock(&vlun->lock);
312 }
313
314 static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
315 {
316         int i;
317
318         if (rqd->nr_pages > 1) {
319                 for (i = 0; i < rqd->nr_pages; i++)
320                         rqd->ppa_list[i] = dev_to_generic_addr(dev,
321                                                         rqd->ppa_list[i]);
322         } else {
323                 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
324         }
325 }
326
327 static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
328 {
329         int i;
330
331         if (rqd->nr_pages > 1) {
332                 for (i = 0; i < rqd->nr_pages; i++)
333                         rqd->ppa_list[i] = generic_to_dev_addr(dev,
334                                                         rqd->ppa_list[i]);
335         } else {
336                 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
337         }
338 }
339
340 static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
341 {
342         if (!dev->ops->submit_io)
343                 return 0;
344
345         /* Convert address space */
346         gennvm_generic_to_addr_mode(dev, rqd);
347
348         rqd->dev = dev;
349         return dev->ops->submit_io(dev->q, rqd);
350 }
351
352 static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
353                                                                 int type)
354 {
355         struct gen_nvm *gn = dev->mp;
356         struct gen_lun *lun;
357         struct nvm_block *blk;
358
359         if (unlikely(ppa->g.ch > dev->nr_chnls ||
360                                         ppa->g.lun > dev->luns_per_chnl ||
361                                         ppa->g.blk > dev->blks_per_lun)) {
362                 WARN_ON_ONCE(1);
363                 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
364                                 ppa->g.ch, dev->nr_chnls,
365                                 ppa->g.lun, dev->luns_per_chnl,
366                                 ppa->g.blk, dev->blks_per_lun);
367                 return;
368         }
369
370         lun = &gn->luns[ppa->g.lun * ppa->g.ch];
371         blk = &lun->vlun.blocks[ppa->g.blk];
372
373         /* will be moved to bb list on put_blk from target */
374         blk->type = type;
375 }
376
377 /* mark block bad. It is expected the target recover from the error. */
378 static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
379 {
380         int i;
381
382         if (!dev->ops->set_bb_tbl)
383                 return;
384
385         if (dev->ops->set_bb_tbl(dev->q, rqd, 1))
386                 return;
387
388         gennvm_addr_to_generic_mode(dev, rqd);
389
390         /* look up blocks and mark them as bad */
391         if (rqd->nr_pages > 1)
392                 for (i = 0; i < rqd->nr_pages; i++)
393                         gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
394         else
395                 gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
396 }
397
398 static int gennvm_end_io(struct nvm_rq *rqd, int error)
399 {
400         struct nvm_tgt_instance *ins = rqd->ins;
401         int ret = 0;
402
403         switch (error) {
404         case NVM_RSP_SUCCESS:
405                 break;
406         case NVM_RSP_ERR_EMPTYPAGE:
407                 break;
408         case NVM_RSP_ERR_FAILWRITE:
409                 gennvm_mark_blk_bad(rqd->dev, rqd);
410         default:
411                 ret++;
412         }
413
414         ret += ins->tt->end_io(rqd, error);
415
416         return ret;
417 }
418
419 static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
420                                                         unsigned long flags)
421 {
422         int plane_cnt = 0, pl_idx, ret;
423         struct ppa_addr addr;
424         struct nvm_rq rqd;
425
426         if (!dev->ops->erase_block)
427                 return 0;
428
429         addr = block_to_ppa(dev, blk);
430
431         if (dev->plane_mode == NVM_PLANE_SINGLE) {
432                 rqd.nr_pages = 1;
433                 rqd.ppa_addr = addr;
434         } else {
435                 plane_cnt = (1 << dev->plane_mode);
436                 rqd.nr_pages = plane_cnt;
437
438                 rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
439                                                         &rqd.dma_ppa_list);
440                 if (!rqd.ppa_list) {
441                         pr_err("gennvm: failed to allocate dma memory\n");
442                         return -ENOMEM;
443                 }
444
445                 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
446                         addr.g.pl = pl_idx;
447                         rqd.ppa_list[pl_idx] = addr;
448                 }
449         }
450
451         gennvm_generic_to_addr_mode(dev, &rqd);
452
453         ret = dev->ops->erase_block(dev->q, &rqd);
454
455         if (plane_cnt)
456                 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
457
458         return ret;
459 }
460
461 static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
462 {
463         struct gen_nvm *gn = dev->mp;
464
465         return &gn->luns[lunid].vlun;
466 }
467
468 static void gennvm_lun_info_print(struct nvm_dev *dev)
469 {
470         struct gen_nvm *gn = dev->mp;
471         struct gen_lun *lun;
472         unsigned int i;
473
474
475         gennvm_for_each_lun(gn, lun, i) {
476                 spin_lock(&lun->vlun.lock);
477
478                 pr_info("%s: lun%8u\t%u\t%u\t%u\n",
479                                 dev->name, i,
480                                 lun->vlun.nr_free_blocks,
481                                 lun->vlun.nr_inuse_blocks,
482                                 lun->vlun.nr_bad_blocks);
483
484                 spin_unlock(&lun->vlun.lock);
485         }
486 }
487
488 static struct nvmm_type gennvm = {
489         .name           = "gennvm",
490         .version        = {0, 1, 0},
491
492         .register_mgr   = gennvm_register,
493         .unregister_mgr = gennvm_unregister,
494
495         .get_blk        = gennvm_get_blk,
496         .put_blk        = gennvm_put_blk,
497
498         .submit_io      = gennvm_submit_io,
499         .end_io         = gennvm_end_io,
500         .erase_blk      = gennvm_erase_blk,
501
502         .get_lun        = gennvm_get_lun,
503         .lun_info_print = gennvm_lun_info_print,
504 };
505
506 static int __init gennvm_module_init(void)
507 {
508         return nvm_register_mgr(&gennvm);
509 }
510
511 static void gennvm_module_exit(void)
512 {
513         nvm_unregister_mgr(&gennvm);
514 }
515
516 module_init(gennvm_module_init);
517 module_exit(gennvm_module_exit);
518 MODULE_LICENSE("GPL v2");
519 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");