2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
30 enum nvme_nvm_admin_opcode {
31 nvme_nvm_admin_identity = 0xe2,
32 nvme_nvm_admin_get_l2p_tbl = 0xea,
33 nvme_nvm_admin_get_bb_tbl = 0xf2,
34 nvme_nvm_admin_set_bb_tbl = 0xf1,
37 struct nvme_nvm_hb_rw {
53 struct nvme_nvm_ph_rw {
69 struct nvme_nvm_identity {
81 struct nvme_nvm_l2ptbl {
94 struct nvme_nvm_getbbtbl {
106 struct nvme_nvm_setbbtbl {
121 struct nvme_nvm_erase_blk {
136 struct nvme_nvm_command {
138 struct nvme_common_command common;
139 struct nvme_nvm_identity identity;
140 struct nvme_nvm_hb_rw hb_rw;
141 struct nvme_nvm_ph_rw ph_rw;
142 struct nvme_nvm_l2ptbl l2p;
143 struct nvme_nvm_getbbtbl get_bb;
144 struct nvme_nvm_setbbtbl set_bb;
145 struct nvme_nvm_erase_blk erase;
149 struct nvme_nvm_lp_mlc {
154 struct nvme_nvm_lp_tbl {
156 struct nvme_nvm_lp_mlc mlc;
159 struct nvme_nvm_id_group {
183 struct nvme_nvm_lp_tbl lptbl;
186 struct nvme_nvm_addr_format {
209 struct nvme_nvm_addr_format ppaf;
211 struct nvme_nvm_id_group groups[4];
214 struct nvme_nvm_bb_tbl {
229 * Check we didn't inadvertently grow the command struct
231 static inline void _nvme_nvm_check_size(void)
233 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
234 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
235 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
236 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
237 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
238 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
239 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
240 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
241 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
242 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
243 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
246 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
248 struct nvme_nvm_id_group *src;
249 struct nvm_id_group *dst;
252 end = min_t(u32, 4, nvm_id->cgrps);
254 for (i = 0; i < end; i++) {
255 src = &nvme_nvm_id->groups[i];
256 dst = &nvm_id->groups[i];
258 dst->mtype = src->mtype;
259 dst->fmtype = src->fmtype;
260 dst->num_ch = src->num_ch;
261 dst->num_lun = src->num_lun;
262 dst->num_pln = src->num_pln;
264 dst->num_pg = le16_to_cpu(src->num_pg);
265 dst->num_blk = le16_to_cpu(src->num_blk);
266 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
267 dst->csecs = le16_to_cpu(src->csecs);
268 dst->sos = le16_to_cpu(src->sos);
270 dst->trdt = le32_to_cpu(src->trdt);
271 dst->trdm = le32_to_cpu(src->trdm);
272 dst->tprt = le32_to_cpu(src->tprt);
273 dst->tprm = le32_to_cpu(src->tprm);
274 dst->tbet = le32_to_cpu(src->tbet);
275 dst->tbem = le32_to_cpu(src->tbem);
276 dst->mpos = le32_to_cpu(src->mpos);
277 dst->mccap = le32_to_cpu(src->mccap);
279 dst->cpar = le16_to_cpu(src->cpar);
281 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
282 memcpy(dst->lptbl.id, src->lptbl.id, 8);
283 dst->lptbl.mlc.num_pairs =
284 le16_to_cpu(src->lptbl.mlc.num_pairs);
285 /* 4 bits per pair */
286 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
287 dst->lptbl.mlc.num_pairs >> 1);
294 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
296 struct nvme_ns *ns = nvmdev->q->queuedata;
297 struct nvme_dev *dev = ns->dev;
298 struct nvme_nvm_id *nvme_nvm_id;
299 struct nvme_nvm_command c = {};
302 c.identity.opcode = nvme_nvm_admin_identity;
303 c.identity.nsid = cpu_to_le32(ns->ns_id);
304 c.identity.chnl_off = 0;
306 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
310 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
311 nvme_nvm_id, sizeof(struct nvme_nvm_id));
317 nvm_id->ver_id = nvme_nvm_id->ver_id;
318 nvm_id->vmnt = nvme_nvm_id->vmnt;
319 nvm_id->cgrps = nvme_nvm_id->cgrps;
320 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
321 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
322 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
323 sizeof(struct nvme_nvm_addr_format));
325 ret = init_grps(nvm_id, nvme_nvm_id);
331 static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
332 nvm_l2p_update_fn *update_l2p, void *priv)
334 struct nvme_ns *ns = nvmdev->q->queuedata;
335 struct nvme_dev *dev = ns->dev;
336 struct nvme_nvm_command c = {};
337 u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
338 u32 nlb_pr_rq = len / sizeof(u64);
343 c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
344 c.l2p.nsid = cpu_to_le32(ns->ns_id);
345 entries = kmalloc(len, GFP_KERNEL);
350 u32 cmd_nlb = min(nlb_pr_rq, nlb);
352 c.l2p.slba = cpu_to_le64(cmd_slba);
353 c.l2p.nlb = cpu_to_le32(cmd_nlb);
355 ret = nvme_submit_sync_cmd(dev->admin_q,
356 (struct nvme_command *)&c, entries, len);
358 dev_err(dev->dev, "L2P table transfer failed (%d)\n",
364 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
378 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
379 int nr_blocks, nvm_bb_update_fn *update_bbtbl,
382 struct request_queue *q = nvmdev->q;
383 struct nvme_ns *ns = q->queuedata;
384 struct nvme_dev *dev = ns->dev;
385 struct nvme_nvm_command c = {};
386 struct nvme_nvm_bb_tbl *bb_tbl;
387 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
390 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
391 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
392 c.get_bb.spba = cpu_to_le64(ppa.ppa);
394 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
398 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
401 dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
406 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
407 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
408 dev_err(dev->dev, "bbt format mismatch\n");
413 if (le16_to_cpu(bb_tbl->verid) != 1) {
415 dev_err(dev->dev, "bbt version not supported\n");
419 if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
421 dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
422 le32_to_cpu(bb_tbl->tblks), nr_blocks);
426 ppa = dev_to_generic_addr(nvmdev, ppa);
427 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
433 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
436 struct nvme_ns *ns = nvmdev->q->queuedata;
437 struct nvme_dev *dev = ns->dev;
438 struct nvme_nvm_command c = {};
441 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
442 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
443 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
444 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
445 c.set_bb.value = type;
447 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
450 dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
454 static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
455 struct nvme_ns *ns, struct nvme_nvm_command *c)
457 c->ph_rw.opcode = rqd->opcode;
458 c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
459 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
460 c->ph_rw.control = cpu_to_le16(rqd->flags);
461 c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
463 if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
464 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
465 rqd->bio->bi_iter.bi_sector));
468 static void nvme_nvm_end_io(struct request *rq, int error)
470 struct nvm_rq *rqd = rq->end_io_data;
472 nvm_end_io(rqd, error);
475 blk_mq_free_request(rq);
478 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
480 struct request_queue *q = dev->q;
481 struct nvme_ns *ns = q->queuedata;
483 struct bio *bio = rqd->bio;
484 struct nvme_nvm_command *cmd;
486 rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
490 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
492 blk_mq_free_request(rq);
496 rq->cmd_type = REQ_TYPE_DRV_PRIV;
497 rq->ioprio = bio_prio(bio);
499 if (bio_has_data(bio))
500 rq->nr_phys_segments = bio_phys_segments(q, bio);
502 rq->__data_len = bio->bi_iter.bi_size;
503 rq->bio = rq->biotail = bio;
505 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
507 rq->cmd = (unsigned char *)cmd;
508 rq->cmd_len = sizeof(struct nvme_nvm_command);
509 rq->special = (void *)0;
511 rq->end_io_data = rqd;
513 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
518 static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
520 struct request_queue *q = dev->q;
521 struct nvme_ns *ns = q->queuedata;
522 struct nvme_nvm_command c = {};
524 c.erase.opcode = NVM_OP_ERASE;
525 c.erase.nsid = cpu_to_le32(ns->ns_id);
526 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
527 c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
529 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
532 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
534 struct nvme_ns *ns = nvmdev->q->queuedata;
535 struct nvme_dev *dev = ns->dev;
537 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
540 static void nvme_nvm_destroy_dma_pool(void *pool)
542 struct dma_pool *dma_pool = pool;
544 dma_pool_destroy(dma_pool);
547 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
548 gfp_t mem_flags, dma_addr_t *dma_handler)
550 return dma_pool_alloc(pool, mem_flags, dma_handler);
553 static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
554 dma_addr_t dma_handler)
556 dma_pool_free(pool, ppa_list, dma_handler);
559 static struct nvm_dev_ops nvme_nvm_dev_ops = {
560 .identity = nvme_nvm_identity,
562 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
564 .get_bb_tbl = nvme_nvm_get_bb_tbl,
565 .set_bb_tbl = nvme_nvm_set_bb_tbl,
567 .submit_io = nvme_nvm_submit_io,
568 .erase_block = nvme_nvm_erase_block,
570 .create_dma_pool = nvme_nvm_create_dma_pool,
571 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
572 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
573 .dev_dma_free = nvme_nvm_dev_dma_free,
578 int nvme_nvm_register(struct request_queue *q, char *disk_name)
580 return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
583 void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
585 nvm_unregister(disk_name);
588 /* move to shared place when used in multiple places. */
589 #define PCI_VENDOR_ID_CNEX 0x1d1d
590 #define PCI_DEVICE_ID_CNEX_WL 0x2807
591 #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
593 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
595 struct nvme_dev *dev = ns->dev;
596 struct pci_dev *pdev = to_pci_dev(dev->dev);
598 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
599 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
600 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
604 /* CNEX Labs - PCI ID + Vendor specific bit */
605 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
606 pdev->device == PCI_DEVICE_ID_CNEX_WL &&