2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 #include <linux/nvme.h>
28 #include <linux/bitops.h>
29 #include <linux/lightnvm.h>
30 #include <linux/vmalloc.h>
32 enum nvme_nvm_admin_opcode {
33 nvme_nvm_admin_identity = 0xe2,
34 nvme_nvm_admin_get_l2p_tbl = 0xea,
35 nvme_nvm_admin_get_bb_tbl = 0xf2,
36 nvme_nvm_admin_set_bb_tbl = 0xf1,
39 struct nvme_nvm_hb_rw {
55 struct nvme_nvm_ph_rw {
71 struct nvme_nvm_identity {
83 struct nvme_nvm_l2ptbl {
96 struct nvme_nvm_getbbtbl {
108 struct nvme_nvm_setbbtbl {
123 struct nvme_nvm_erase_blk {
138 struct nvme_nvm_command {
140 struct nvme_common_command common;
141 struct nvme_nvm_identity identity;
142 struct nvme_nvm_hb_rw hb_rw;
143 struct nvme_nvm_ph_rw ph_rw;
144 struct nvme_nvm_l2ptbl l2p;
145 struct nvme_nvm_getbbtbl get_bb;
146 struct nvme_nvm_setbbtbl set_bb;
147 struct nvme_nvm_erase_blk erase;
151 struct nvme_nvm_id_group {
177 struct nvme_nvm_addr_format {
200 struct nvme_nvm_addr_format ppaf;
202 struct nvme_nvm_id_group groups[4];
205 struct nvme_nvm_bb_tbl {
220 * Check we didn't inadvertently grow the command struct
222 static inline void _nvme_nvm_check_size(void)
224 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
225 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
226 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
227 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
228 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
229 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
230 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
231 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
232 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
233 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
234 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
237 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
239 struct nvme_nvm_id_group *src;
240 struct nvm_id_group *dst;
243 end = min_t(u32, 4, nvm_id->cgrps);
245 for (i = 0; i < end; i++) {
246 src = &nvme_nvm_id->groups[i];
247 dst = &nvm_id->groups[i];
249 dst->mtype = src->mtype;
250 dst->fmtype = src->fmtype;
251 dst->num_ch = src->num_ch;
252 dst->num_lun = src->num_lun;
253 dst->num_pln = src->num_pln;
255 dst->num_pg = le16_to_cpu(src->num_pg);
256 dst->num_blk = le16_to_cpu(src->num_blk);
257 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
258 dst->csecs = le16_to_cpu(src->csecs);
259 dst->sos = le16_to_cpu(src->sos);
261 dst->trdt = le32_to_cpu(src->trdt);
262 dst->trdm = le32_to_cpu(src->trdm);
263 dst->tprt = le32_to_cpu(src->tprt);
264 dst->tprm = le32_to_cpu(src->tprm);
265 dst->tbet = le32_to_cpu(src->tbet);
266 dst->tbem = le32_to_cpu(src->tbem);
267 dst->mpos = le32_to_cpu(src->mpos);
268 dst->mccap = le32_to_cpu(src->mccap);
270 dst->cpar = le16_to_cpu(src->cpar);
276 static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
278 struct nvme_ns *ns = q->queuedata;
279 struct nvme_dev *dev = ns->dev;
280 struct nvme_nvm_id *nvme_nvm_id;
281 struct nvme_nvm_command c = {};
284 c.identity.opcode = nvme_nvm_admin_identity;
285 c.identity.nsid = cpu_to_le32(ns->ns_id);
286 c.identity.chnl_off = 0;
288 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
292 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
293 nvme_nvm_id, sizeof(struct nvme_nvm_id));
299 nvm_id->ver_id = nvme_nvm_id->ver_id;
300 nvm_id->vmnt = nvme_nvm_id->vmnt;
301 nvm_id->cgrps = nvme_nvm_id->cgrps;
302 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
303 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
304 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
305 sizeof(struct nvme_nvm_addr_format));
307 ret = init_grps(nvm_id, nvme_nvm_id);
313 static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
314 nvm_l2p_update_fn *update_l2p, void *priv)
316 struct nvme_ns *ns = q->queuedata;
317 struct nvme_dev *dev = ns->dev;
318 struct nvme_nvm_command c = {};
319 u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
320 u32 nlb_pr_rq = len / sizeof(u64);
325 c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
326 c.l2p.nsid = cpu_to_le32(ns->ns_id);
327 entries = kmalloc(len, GFP_KERNEL);
332 u32 cmd_nlb = min(nlb_pr_rq, nlb);
334 c.l2p.slba = cpu_to_le64(cmd_slba);
335 c.l2p.nlb = cpu_to_le32(cmd_nlb);
337 ret = nvme_submit_sync_cmd(dev->admin_q,
338 (struct nvme_command *)&c, entries, len);
340 dev_err(dev->dev, "L2P table transfer failed (%d)\n",
346 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
360 static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa,
361 int nr_blocks, nvm_bb_update_fn *update_bbtbl,
364 struct nvme_ns *ns = q->queuedata;
365 struct nvme_dev *dev = ns->dev;
366 struct nvme_nvm_command c = {};
367 struct nvme_nvm_bb_tbl *bb_tbl;
368 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
371 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
372 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
373 c.get_bb.spba = cpu_to_le64(ppa.ppa);
375 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
379 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
382 dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
387 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
388 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
389 dev_err(dev->dev, "bbt format mismatch\n");
394 if (le16_to_cpu(bb_tbl->verid) != 1) {
396 dev_err(dev->dev, "bbt version not supported\n");
400 if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
402 dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
403 le32_to_cpu(bb_tbl->tblks), nr_blocks);
407 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
418 static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
421 struct nvme_ns *ns = q->queuedata;
422 struct nvme_dev *dev = ns->dev;
423 struct nvme_nvm_command c = {};
426 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
427 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
428 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
429 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
430 c.set_bb.value = type;
432 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
435 dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
439 static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
440 struct nvme_ns *ns, struct nvme_nvm_command *c)
442 c->ph_rw.opcode = rqd->opcode;
443 c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
444 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
445 c->ph_rw.control = cpu_to_le16(rqd->flags);
446 c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
448 if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
449 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
450 rqd->bio->bi_iter.bi_sector));
453 static void nvme_nvm_end_io(struct request *rq, int error)
455 struct nvm_rq *rqd = rq->end_io_data;
456 struct nvm_dev *dev = rqd->dev;
458 if (dev->mt->end_io(rqd, error))
459 pr_err("nvme: err status: %x result: %lx\n",
460 rq->errors, (unsigned long)rq->special);
463 blk_mq_free_request(rq);
466 static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
468 struct nvme_ns *ns = q->queuedata;
470 struct bio *bio = rqd->bio;
471 struct nvme_nvm_command *cmd;
473 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
477 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
479 blk_mq_free_request(rq);
483 rq->cmd_type = REQ_TYPE_DRV_PRIV;
484 rq->ioprio = bio_prio(bio);
486 if (bio_has_data(bio))
487 rq->nr_phys_segments = bio_phys_segments(q, bio);
489 rq->__data_len = bio->bi_iter.bi_size;
490 rq->bio = rq->biotail = bio;
492 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
494 rq->cmd = (unsigned char *)cmd;
495 rq->cmd_len = sizeof(struct nvme_nvm_command);
496 rq->special = (void *)0;
498 rq->end_io_data = rqd;
500 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
505 static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
507 struct nvme_ns *ns = q->queuedata;
508 struct nvme_nvm_command c = {};
510 c.erase.opcode = NVM_OP_ERASE;
511 c.erase.nsid = cpu_to_le32(ns->ns_id);
512 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
513 c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
515 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
518 static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name)
520 struct nvme_ns *ns = q->queuedata;
521 struct nvme_dev *dev = ns->dev;
523 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
526 static void nvme_nvm_destroy_dma_pool(void *pool)
528 struct dma_pool *dma_pool = pool;
530 dma_pool_destroy(dma_pool);
533 static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool,
534 gfp_t mem_flags, dma_addr_t *dma_handler)
536 return dma_pool_alloc(pool, mem_flags, dma_handler);
539 static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
540 dma_addr_t dma_handler)
542 dma_pool_free(pool, ppa_list, dma_handler);
545 static struct nvm_dev_ops nvme_nvm_dev_ops = {
546 .identity = nvme_nvm_identity,
548 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
550 .get_bb_tbl = nvme_nvm_get_bb_tbl,
551 .set_bb_tbl = nvme_nvm_set_bb_tbl,
553 .submit_io = nvme_nvm_submit_io,
554 .erase_block = nvme_nvm_erase_block,
556 .create_dma_pool = nvme_nvm_create_dma_pool,
557 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
558 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
559 .dev_dma_free = nvme_nvm_dev_dma_free,
564 int nvme_nvm_register(struct request_queue *q, char *disk_name)
566 return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
569 void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
571 nvm_unregister(disk_name);
574 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
576 struct nvme_dev *dev = ns->dev;
577 struct pci_dev *pdev = to_pci_dev(dev->dev);
579 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
580 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 &&
584 /* CNEX Labs - PCI ID + Vendor specific bit */
585 if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 &&
592 int nvme_nvm_register(struct request_queue *q, char *disk_name)
596 void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
597 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
601 #endif /* CONFIG_NVM */