Merge tag 'mmc-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[linux-2.6-block.git] / drivers / nvme / host / lightnvm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvme-lightnvm.c - LightNVM NVMe device
4  *
5  * Copyright (C) 2014-2015 IT University of Copenhagen
6  * Initial release: Matias Bjorling <mb@lightnvm.io>
7  */
8
9 #include "nvme.h"
10
11 #include <linux/nvme.h>
12 #include <linux/bitops.h>
13 #include <linux/lightnvm.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sched/sysctl.h>
16 #include <uapi/linux/lightnvm.h>
17
18 enum nvme_nvm_admin_opcode {
19         nvme_nvm_admin_identity         = 0xe2,
20         nvme_nvm_admin_get_bb_tbl       = 0xf2,
21         nvme_nvm_admin_set_bb_tbl       = 0xf1,
22 };
23
24 enum nvme_nvm_log_page {
25         NVME_NVM_LOG_REPORT_CHUNK       = 0xca,
26 };
27
28 struct nvme_nvm_ph_rw {
29         __u8                    opcode;
30         __u8                    flags;
31         __u16                   command_id;
32         __le32                  nsid;
33         __u64                   rsvd2;
34         __le64                  metadata;
35         __le64                  prp1;
36         __le64                  prp2;
37         __le64                  spba;
38         __le16                  length;
39         __le16                  control;
40         __le32                  dsmgmt;
41         __le64                  resv;
42 };
43
44 struct nvme_nvm_erase_blk {
45         __u8                    opcode;
46         __u8                    flags;
47         __u16                   command_id;
48         __le32                  nsid;
49         __u64                   rsvd[2];
50         __le64                  prp1;
51         __le64                  prp2;
52         __le64                  spba;
53         __le16                  length;
54         __le16                  control;
55         __le32                  dsmgmt;
56         __le64                  resv;
57 };
58
59 struct nvme_nvm_identity {
60         __u8                    opcode;
61         __u8                    flags;
62         __u16                   command_id;
63         __le32                  nsid;
64         __u64                   rsvd[2];
65         __le64                  prp1;
66         __le64                  prp2;
67         __u32                   rsvd11[6];
68 };
69
70 struct nvme_nvm_getbbtbl {
71         __u8                    opcode;
72         __u8                    flags;
73         __u16                   command_id;
74         __le32                  nsid;
75         __u64                   rsvd[2];
76         __le64                  prp1;
77         __le64                  prp2;
78         __le64                  spba;
79         __u32                   rsvd4[4];
80 };
81
82 struct nvme_nvm_setbbtbl {
83         __u8                    opcode;
84         __u8                    flags;
85         __u16                   command_id;
86         __le32                  nsid;
87         __le64                  rsvd[2];
88         __le64                  prp1;
89         __le64                  prp2;
90         __le64                  spba;
91         __le16                  nlb;
92         __u8                    value;
93         __u8                    rsvd3;
94         __u32                   rsvd4[3];
95 };
96
97 struct nvme_nvm_command {
98         union {
99                 struct nvme_common_command common;
100                 struct nvme_nvm_ph_rw ph_rw;
101                 struct nvme_nvm_erase_blk erase;
102                 struct nvme_nvm_identity identity;
103                 struct nvme_nvm_getbbtbl get_bb;
104                 struct nvme_nvm_setbbtbl set_bb;
105         };
106 };
107
108 struct nvme_nvm_id12_grp {
109         __u8                    mtype;
110         __u8                    fmtype;
111         __le16                  res16;
112         __u8                    num_ch;
113         __u8                    num_lun;
114         __u8                    num_pln;
115         __u8                    rsvd1;
116         __le16                  num_chk;
117         __le16                  num_pg;
118         __le16                  fpg_sz;
119         __le16                  csecs;
120         __le16                  sos;
121         __le16                  rsvd2;
122         __le32                  trdt;
123         __le32                  trdm;
124         __le32                  tprt;
125         __le32                  tprm;
126         __le32                  tbet;
127         __le32                  tbem;
128         __le32                  mpos;
129         __le32                  mccap;
130         __le16                  cpar;
131         __u8                    reserved[906];
132 } __packed;
133
134 struct nvme_nvm_id12_addrf {
135         __u8                    ch_offset;
136         __u8                    ch_len;
137         __u8                    lun_offset;
138         __u8                    lun_len;
139         __u8                    pln_offset;
140         __u8                    pln_len;
141         __u8                    blk_offset;
142         __u8                    blk_len;
143         __u8                    pg_offset;
144         __u8                    pg_len;
145         __u8                    sec_offset;
146         __u8                    sec_len;
147         __u8                    res[4];
148 } __packed;
149
150 struct nvme_nvm_id12 {
151         __u8                    ver_id;
152         __u8                    vmnt;
153         __u8                    cgrps;
154         __u8                    res;
155         __le32                  cap;
156         __le32                  dom;
157         struct nvme_nvm_id12_addrf ppaf;
158         __u8                    resv[228];
159         struct nvme_nvm_id12_grp grp;
160         __u8                    resv2[2880];
161 } __packed;
162
163 struct nvme_nvm_bb_tbl {
164         __u8    tblid[4];
165         __le16  verid;
166         __le16  revid;
167         __le32  rvsd1;
168         __le32  tblks;
169         __le32  tfact;
170         __le32  tgrown;
171         __le32  tdresv;
172         __le32  thresv;
173         __le32  rsvd2[8];
174         __u8    blk[0];
175 };
176
177 struct nvme_nvm_id20_addrf {
178         __u8                    grp_len;
179         __u8                    pu_len;
180         __u8                    chk_len;
181         __u8                    lba_len;
182         __u8                    resv[4];
183 };
184
185 struct nvme_nvm_id20 {
186         __u8                    mjr;
187         __u8                    mnr;
188         __u8                    resv[6];
189
190         struct nvme_nvm_id20_addrf lbaf;
191
192         __le32                  mccap;
193         __u8                    resv2[12];
194
195         __u8                    wit;
196         __u8                    resv3[31];
197
198         /* Geometry */
199         __le16                  num_grp;
200         __le16                  num_pu;
201         __le32                  num_chk;
202         __le32                  clba;
203         __u8                    resv4[52];
204
205         /* Write data requirements */
206         __le32                  ws_min;
207         __le32                  ws_opt;
208         __le32                  mw_cunits;
209         __le32                  maxoc;
210         __le32                  maxocpu;
211         __u8                    resv5[44];
212
213         /* Performance related metrics */
214         __le32                  trdt;
215         __le32                  trdm;
216         __le32                  twrt;
217         __le32                  twrm;
218         __le32                  tcrst;
219         __le32                  tcrsm;
220         __u8                    resv6[40];
221
222         /* Reserved area */
223         __u8                    resv7[2816];
224
225         /* Vendor specific */
226         __u8                    vs[1024];
227 };
228
229 struct nvme_nvm_chk_meta {
230         __u8    state;
231         __u8    type;
232         __u8    wi;
233         __u8    rsvd[5];
234         __le64  slba;
235         __le64  cnlb;
236         __le64  wp;
237 };
238
239 /*
240  * Check we didn't inadvertently grow the command struct
241  */
242 static inline void _nvme_nvm_check_size(void)
243 {
244         BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
245         BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
246         BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
247         BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
248         BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
249         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
250         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
251         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
252         BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
253         BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
254         BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
255         BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
256         BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
257                                                 sizeof(struct nvm_chk_meta));
258 }
259
260 static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
261                                  struct nvme_nvm_id12_addrf *src)
262 {
263         dst->ch_len = src->ch_len;
264         dst->lun_len = src->lun_len;
265         dst->blk_len = src->blk_len;
266         dst->pg_len = src->pg_len;
267         dst->pln_len = src->pln_len;
268         dst->sec_len = src->sec_len;
269
270         dst->ch_offset = src->ch_offset;
271         dst->lun_offset = src->lun_offset;
272         dst->blk_offset = src->blk_offset;
273         dst->pg_offset = src->pg_offset;
274         dst->pln_offset = src->pln_offset;
275         dst->sec_offset = src->sec_offset;
276
277         dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
278         dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
279         dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
280         dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
281         dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
282         dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
283 }
284
285 static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
286                              struct nvm_geo *geo)
287 {
288         struct nvme_nvm_id12_grp *src;
289         int sec_per_pg, sec_per_pl, pg_per_blk;
290
291         if (id->cgrps != 1)
292                 return -EINVAL;
293
294         src = &id->grp;
295
296         if (src->mtype != 0) {
297                 pr_err("nvm: memory type not supported\n");
298                 return -EINVAL;
299         }
300
301         /* 1.2 spec. only reports a single version id - unfold */
302         geo->major_ver_id = id->ver_id;
303         geo->minor_ver_id = 2;
304
305         /* Set compacted version for upper layers */
306         geo->version = NVM_OCSSD_SPEC_12;
307
308         geo->num_ch = src->num_ch;
309         geo->num_lun = src->num_lun;
310         geo->all_luns = geo->num_ch * geo->num_lun;
311
312         geo->num_chk = le16_to_cpu(src->num_chk);
313
314         geo->csecs = le16_to_cpu(src->csecs);
315         geo->sos = le16_to_cpu(src->sos);
316
317         pg_per_blk = le16_to_cpu(src->num_pg);
318         sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
319         sec_per_pl = sec_per_pg * src->num_pln;
320         geo->clba = sec_per_pl * pg_per_blk;
321
322         geo->all_chunks = geo->all_luns * geo->num_chk;
323         geo->total_secs = geo->clba * geo->all_chunks;
324
325         geo->ws_min = sec_per_pg;
326         geo->ws_opt = sec_per_pg;
327         geo->mw_cunits = geo->ws_opt << 3;      /* default to MLC safe values */
328
329         /* Do not impose values for maximum number of open blocks as it is
330          * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
331          * specify these values through a quirk if restrictions apply.
332          */
333         geo->maxoc = geo->all_luns * geo->num_chk;
334         geo->maxocpu = geo->num_chk;
335
336         geo->mccap = le32_to_cpu(src->mccap);
337
338         geo->trdt = le32_to_cpu(src->trdt);
339         geo->trdm = le32_to_cpu(src->trdm);
340         geo->tprt = le32_to_cpu(src->tprt);
341         geo->tprm = le32_to_cpu(src->tprm);
342         geo->tbet = le32_to_cpu(src->tbet);
343         geo->tbem = le32_to_cpu(src->tbem);
344
345         /* 1.2 compatibility */
346         geo->vmnt = id->vmnt;
347         geo->cap = le32_to_cpu(id->cap);
348         geo->dom = le32_to_cpu(id->dom);
349
350         geo->mtype = src->mtype;
351         geo->fmtype = src->fmtype;
352
353         geo->cpar = le16_to_cpu(src->cpar);
354         geo->mpos = le32_to_cpu(src->mpos);
355
356         geo->pln_mode = NVM_PLANE_SINGLE;
357
358         if (geo->mpos & 0x020202) {
359                 geo->pln_mode = NVM_PLANE_DOUBLE;
360                 geo->ws_opt <<= 1;
361         } else if (geo->mpos & 0x040404) {
362                 geo->pln_mode = NVM_PLANE_QUAD;
363                 geo->ws_opt <<= 2;
364         }
365
366         geo->num_pln = src->num_pln;
367         geo->num_pg = le16_to_cpu(src->num_pg);
368         geo->fpg_sz = le16_to_cpu(src->fpg_sz);
369
370         nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
371
372         return 0;
373 }
374
375 static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
376                                  struct nvme_nvm_id20_addrf *src)
377 {
378         dst->ch_len = src->grp_len;
379         dst->lun_len = src->pu_len;
380         dst->chk_len = src->chk_len;
381         dst->sec_len = src->lba_len;
382
383         dst->sec_offset = 0;
384         dst->chk_offset = dst->sec_len;
385         dst->lun_offset = dst->chk_offset + dst->chk_len;
386         dst->ch_offset = dst->lun_offset + dst->lun_len;
387
388         dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
389         dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
390         dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
391         dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
392 }
393
394 static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
395                              struct nvm_geo *geo)
396 {
397         geo->major_ver_id = id->mjr;
398         geo->minor_ver_id = id->mnr;
399
400         /* Set compacted version for upper layers */
401         geo->version = NVM_OCSSD_SPEC_20;
402
403         geo->num_ch = le16_to_cpu(id->num_grp);
404         geo->num_lun = le16_to_cpu(id->num_pu);
405         geo->all_luns = geo->num_ch * geo->num_lun;
406
407         geo->num_chk = le32_to_cpu(id->num_chk);
408         geo->clba = le32_to_cpu(id->clba);
409
410         geo->all_chunks = geo->all_luns * geo->num_chk;
411         geo->total_secs = geo->clba * geo->all_chunks;
412
413         geo->ws_min = le32_to_cpu(id->ws_min);
414         geo->ws_opt = le32_to_cpu(id->ws_opt);
415         geo->mw_cunits = le32_to_cpu(id->mw_cunits);
416         geo->maxoc = le32_to_cpu(id->maxoc);
417         geo->maxocpu = le32_to_cpu(id->maxocpu);
418
419         geo->trdt = le32_to_cpu(id->trdt);
420         geo->trdm = le32_to_cpu(id->trdm);
421         geo->tprt = le32_to_cpu(id->twrt);
422         geo->tprm = le32_to_cpu(id->twrm);
423         geo->tbet = le32_to_cpu(id->tcrst);
424         geo->tbem = le32_to_cpu(id->tcrsm);
425
426         nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
427
428         return 0;
429 }
430
431 static int nvme_nvm_identity(struct nvm_dev *nvmdev)
432 {
433         struct nvme_ns *ns = nvmdev->q->queuedata;
434         struct nvme_nvm_id12 *id;
435         struct nvme_nvm_command c = {};
436         int ret;
437
438         c.identity.opcode = nvme_nvm_admin_identity;
439         c.identity.nsid = cpu_to_le32(ns->head->ns_id);
440
441         id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
442         if (!id)
443                 return -ENOMEM;
444
445         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
446                                 id, sizeof(struct nvme_nvm_id12));
447         if (ret) {
448                 ret = -EIO;
449                 goto out;
450         }
451
452         /*
453          * The 1.2 and 2.0 specifications share the first byte in their geometry
454          * command to make it possible to know what version a device implements.
455          */
456         switch (id->ver_id) {
457         case 1:
458                 ret = nvme_nvm_setup_12(id, &nvmdev->geo);
459                 break;
460         case 2:
461                 ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
462                                                         &nvmdev->geo);
463                 break;
464         default:
465                 dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
466                                                         id->ver_id);
467                 ret = -EINVAL;
468         }
469
470 out:
471         kfree(id);
472         return ret;
473 }
474
475 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
476                                                                 u8 *blks)
477 {
478         struct request_queue *q = nvmdev->q;
479         struct nvm_geo *geo = &nvmdev->geo;
480         struct nvme_ns *ns = q->queuedata;
481         struct nvme_ctrl *ctrl = ns->ctrl;
482         struct nvme_nvm_command c = {};
483         struct nvme_nvm_bb_tbl *bb_tbl;
484         int nr_blks = geo->num_chk * geo->num_pln;
485         int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
486         int ret = 0;
487
488         c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
489         c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
490         c.get_bb.spba = cpu_to_le64(ppa.ppa);
491
492         bb_tbl = kzalloc(tblsz, GFP_KERNEL);
493         if (!bb_tbl)
494                 return -ENOMEM;
495
496         ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
497                                                                 bb_tbl, tblsz);
498         if (ret) {
499                 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
500                 ret = -EIO;
501                 goto out;
502         }
503
504         if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
505                 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
506                 dev_err(ctrl->device, "bbt format mismatch\n");
507                 ret = -EINVAL;
508                 goto out;
509         }
510
511         if (le16_to_cpu(bb_tbl->verid) != 1) {
512                 ret = -EINVAL;
513                 dev_err(ctrl->device, "bbt version not supported\n");
514                 goto out;
515         }
516
517         if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
518                 ret = -EINVAL;
519                 dev_err(ctrl->device,
520                                 "bbt unsuspected blocks returned (%u!=%u)",
521                                 le32_to_cpu(bb_tbl->tblks), nr_blks);
522                 goto out;
523         }
524
525         memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
526 out:
527         kfree(bb_tbl);
528         return ret;
529 }
530
531 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
532                                                         int nr_ppas, int type)
533 {
534         struct nvme_ns *ns = nvmdev->q->queuedata;
535         struct nvme_nvm_command c = {};
536         int ret = 0;
537
538         c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
539         c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
540         c.set_bb.spba = cpu_to_le64(ppas->ppa);
541         c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
542         c.set_bb.value = type;
543
544         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
545                                                                 NULL, 0);
546         if (ret)
547                 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
548                                                                         ret);
549         return ret;
550 }
551
552 /*
553  * Expect the lba in device format
554  */
555 static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
556                                  sector_t slba, int nchks,
557                                  struct nvm_chk_meta *meta)
558 {
559         struct nvm_geo *geo = &ndev->geo;
560         struct nvme_ns *ns = ndev->q->queuedata;
561         struct nvme_ctrl *ctrl = ns->ctrl;
562         struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
563         struct ppa_addr ppa;
564         size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
565         size_t log_pos, offset, len;
566         int i, max_len;
567         int ret = 0;
568
569         /*
570          * limit requests to maximum 256K to avoid issuing arbitrary large
571          * requests when the device does not specific a maximum transfer size.
572          */
573         max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
574
575         dev_meta = kmalloc(max_len, GFP_KERNEL);
576         if (!dev_meta)
577                 return -ENOMEM;
578
579         /* Normalize lba address space to obtain log offset */
580         ppa.ppa = slba;
581         ppa = dev_to_generic_addr(ndev, ppa);
582
583         log_pos = ppa.m.chk;
584         log_pos += ppa.m.pu * geo->num_chk;
585         log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
586
587         offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
588
589         while (left) {
590                 len = min_t(unsigned int, left, max_len);
591
592                 memset(dev_meta, 0, max_len);
593                 dev_meta_off = dev_meta;
594
595                 ret = nvme_get_log(ctrl, ns->head->ns_id,
596                                 NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
597                                 offset);
598                 if (ret) {
599                         dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
600                         break;
601                 }
602
603                 for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
604                         meta->state = dev_meta_off->state;
605                         meta->type = dev_meta_off->type;
606                         meta->wi = dev_meta_off->wi;
607                         meta->slba = le64_to_cpu(dev_meta_off->slba);
608                         meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
609                         meta->wp = le64_to_cpu(dev_meta_off->wp);
610
611                         meta++;
612                         dev_meta_off++;
613                 }
614
615                 offset += len;
616                 left -= len;
617         }
618
619         kfree(dev_meta);
620
621         return ret;
622 }
623
624 static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
625                                     struct nvme_nvm_command *c)
626 {
627         c->ph_rw.opcode = rqd->opcode;
628         c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
629         c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
630         c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
631         c->ph_rw.control = cpu_to_le16(rqd->flags);
632         c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
633 }
634
635 static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
636 {
637         struct nvm_rq *rqd = rq->end_io_data;
638
639         rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
640         rqd->error = nvme_req(rq)->status;
641         nvm_end_io(rqd);
642
643         kfree(nvme_req(rq)->cmd);
644         blk_mq_free_request(rq);
645 }
646
647 static struct request *nvme_nvm_alloc_request(struct request_queue *q,
648                                               struct nvm_rq *rqd,
649                                               struct nvme_nvm_command *cmd)
650 {
651         struct nvme_ns *ns = q->queuedata;
652         struct request *rq;
653
654         nvme_nvm_rqtocmd(rqd, ns, cmd);
655
656         rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
657         if (IS_ERR(rq))
658                 return rq;
659
660         rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
661
662         if (rqd->bio)
663                 blk_rq_append_bio(rq, &rqd->bio);
664         else
665                 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
666
667         return rq;
668 }
669
670 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
671 {
672         struct request_queue *q = dev->q;
673         struct nvme_nvm_command *cmd;
674         struct request *rq;
675
676         cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
677         if (!cmd)
678                 return -ENOMEM;
679
680         rq = nvme_nvm_alloc_request(q, rqd, cmd);
681         if (IS_ERR(rq)) {
682                 kfree(cmd);
683                 return PTR_ERR(rq);
684         }
685
686         rq->end_io_data = rqd;
687
688         blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
689
690         return 0;
691 }
692
693 static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
694 {
695         struct request_queue *q = dev->q;
696         struct request *rq;
697         struct nvme_nvm_command cmd;
698         int ret = 0;
699
700         memset(&cmd, 0, sizeof(struct nvme_nvm_command));
701
702         rq = nvme_nvm_alloc_request(q, rqd, &cmd);
703         if (IS_ERR(rq))
704                 return PTR_ERR(rq);
705
706         /* I/Os can fail and the error is signaled through rqd. Callers must
707          * handle the error accordingly.
708          */
709         blk_execute_rq(q, NULL, rq, 0);
710         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
711                 ret = -EINTR;
712
713         rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
714         rqd->error = nvme_req(rq)->status;
715
716         blk_mq_free_request(rq);
717
718         return ret;
719 }
720
721 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
722                                         int size)
723 {
724         struct nvme_ns *ns = nvmdev->q->queuedata;
725
726         return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
727 }
728
729 static void nvme_nvm_destroy_dma_pool(void *pool)
730 {
731         struct dma_pool *dma_pool = pool;
732
733         dma_pool_destroy(dma_pool);
734 }
735
736 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
737                                     gfp_t mem_flags, dma_addr_t *dma_handler)
738 {
739         return dma_pool_alloc(pool, mem_flags, dma_handler);
740 }
741
742 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
743                                                         dma_addr_t dma_handler)
744 {
745         dma_pool_free(pool, addr, dma_handler);
746 }
747
748 static struct nvm_dev_ops nvme_nvm_dev_ops = {
749         .identity               = nvme_nvm_identity,
750
751         .get_bb_tbl             = nvme_nvm_get_bb_tbl,
752         .set_bb_tbl             = nvme_nvm_set_bb_tbl,
753
754         .get_chk_meta           = nvme_nvm_get_chk_meta,
755
756         .submit_io              = nvme_nvm_submit_io,
757         .submit_io_sync         = nvme_nvm_submit_io_sync,
758
759         .create_dma_pool        = nvme_nvm_create_dma_pool,
760         .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
761         .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
762         .dev_dma_free           = nvme_nvm_dev_dma_free,
763 };
764
765 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
766                                 struct nvme_ns *ns,
767                                 struct nvme_nvm_command *vcmd,
768                                 void __user *ubuf, unsigned int bufflen,
769                                 void __user *meta_buf, unsigned int meta_len,
770                                 void __user *ppa_buf, unsigned int ppa_len,
771                                 u32 *result, u64 *status, unsigned int timeout)
772 {
773         bool write = nvme_is_write((struct nvme_command *)vcmd);
774         struct nvm_dev *dev = ns->ndev;
775         struct gendisk *disk = ns->disk;
776         struct request *rq;
777         struct bio *bio = NULL;
778         __le64 *ppa_list = NULL;
779         dma_addr_t ppa_dma;
780         __le64 *metadata = NULL;
781         dma_addr_t metadata_dma;
782         DECLARE_COMPLETION_ONSTACK(wait);
783         int ret = 0;
784
785         rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
786                         NVME_QID_ANY);
787         if (IS_ERR(rq)) {
788                 ret = -ENOMEM;
789                 goto err_cmd;
790         }
791
792         rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
793
794         if (ppa_buf && ppa_len) {
795                 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
796                 if (!ppa_list) {
797                         ret = -ENOMEM;
798                         goto err_rq;
799                 }
800                 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
801                                                 sizeof(u64) * (ppa_len + 1))) {
802                         ret = -EFAULT;
803                         goto err_ppa;
804                 }
805                 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
806         } else {
807                 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
808         }
809
810         if (ubuf && bufflen) {
811                 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
812                 if (ret)
813                         goto err_ppa;
814                 bio = rq->bio;
815
816                 if (meta_buf && meta_len) {
817                         metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
818                                                                 &metadata_dma);
819                         if (!metadata) {
820                                 ret = -ENOMEM;
821                                 goto err_map;
822                         }
823
824                         if (write) {
825                                 if (copy_from_user(metadata,
826                                                 (void __user *)meta_buf,
827                                                 meta_len)) {
828                                         ret = -EFAULT;
829                                         goto err_meta;
830                                 }
831                         }
832                         vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
833                 }
834
835                 bio->bi_disk = disk;
836         }
837
838         blk_execute_rq(q, NULL, rq, 0);
839
840         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
841                 ret = -EINTR;
842         else if (nvme_req(rq)->status & 0x7ff)
843                 ret = -EIO;
844         if (result)
845                 *result = nvme_req(rq)->status & 0x7ff;
846         if (status)
847                 *status = le64_to_cpu(nvme_req(rq)->result.u64);
848
849         if (metadata && !ret && !write) {
850                 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
851                         ret = -EFAULT;
852         }
853 err_meta:
854         if (meta_buf && meta_len)
855                 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
856 err_map:
857         if (bio)
858                 blk_rq_unmap_user(bio);
859 err_ppa:
860         if (ppa_buf && ppa_len)
861                 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
862 err_rq:
863         blk_mq_free_request(rq);
864 err_cmd:
865         return ret;
866 }
867
868 static int nvme_nvm_submit_vio(struct nvme_ns *ns,
869                                         struct nvm_user_vio __user *uvio)
870 {
871         struct nvm_user_vio vio;
872         struct nvme_nvm_command c;
873         unsigned int length;
874         int ret;
875
876         if (copy_from_user(&vio, uvio, sizeof(vio)))
877                 return -EFAULT;
878         if (vio.flags)
879                 return -EINVAL;
880
881         memset(&c, 0, sizeof(c));
882         c.ph_rw.opcode = vio.opcode;
883         c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
884         c.ph_rw.control = cpu_to_le16(vio.control);
885         c.ph_rw.length = cpu_to_le16(vio.nppas);
886
887         length = (vio.nppas + 1) << ns->lba_shift;
888
889         ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
890                         (void __user *)(uintptr_t)vio.addr, length,
891                         (void __user *)(uintptr_t)vio.metadata,
892                                                         vio.metadata_len,
893                         (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
894                         &vio.result, &vio.status, 0);
895
896         if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
897                 return -EFAULT;
898
899         return ret;
900 }
901
902 static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
903                                         struct nvm_passthru_vio __user *uvcmd)
904 {
905         struct nvm_passthru_vio vcmd;
906         struct nvme_nvm_command c;
907         struct request_queue *q;
908         unsigned int timeout = 0;
909         int ret;
910
911         if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
912                 return -EFAULT;
913         if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
914                 return -EACCES;
915         if (vcmd.flags)
916                 return -EINVAL;
917
918         memset(&c, 0, sizeof(c));
919         c.common.opcode = vcmd.opcode;
920         c.common.nsid = cpu_to_le32(ns->head->ns_id);
921         c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
922         c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
923         /* cdw11-12 */
924         c.ph_rw.length = cpu_to_le16(vcmd.nppas);
925         c.ph_rw.control  = cpu_to_le16(vcmd.control);
926         c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
927         c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
928         c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
929
930         if (vcmd.timeout_ms)
931                 timeout = msecs_to_jiffies(vcmd.timeout_ms);
932
933         q = admin ? ns->ctrl->admin_q : ns->queue;
934
935         ret = nvme_nvm_submit_user_cmd(q, ns,
936                         (struct nvme_nvm_command *)&c,
937                         (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
938                         (void __user *)(uintptr_t)vcmd.metadata,
939                                                         vcmd.metadata_len,
940                         (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
941                         &vcmd.result, &vcmd.status, timeout);
942
943         if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
944                 return -EFAULT;
945
946         return ret;
947 }
948
949 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
950 {
951         switch (cmd) {
952         case NVME_NVM_IOCTL_ADMIN_VIO:
953                 return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
954         case NVME_NVM_IOCTL_IO_VIO:
955                 return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
956         case NVME_NVM_IOCTL_SUBMIT_VIO:
957                 return nvme_nvm_submit_vio(ns, (void __user *)arg);
958         default:
959                 return -ENOTTY;
960         }
961 }
962
963 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
964 {
965         struct request_queue *q = ns->queue;
966         struct nvm_dev *dev;
967         struct nvm_geo *geo;
968
969         _nvme_nvm_check_size();
970
971         dev = nvm_alloc_dev(node);
972         if (!dev)
973                 return -ENOMEM;
974
975         /* Note that csecs and sos will be overridden if it is a 1.2 drive. */
976         geo = &dev->geo;
977         geo->csecs = 1 << ns->lba_shift;
978         geo->sos = ns->ms;
979         geo->ext = ns->ext;
980         geo->mdts = ns->ctrl->max_hw_sectors;
981
982         dev->q = q;
983         memcpy(dev->name, disk_name, DISK_NAME_LEN);
984         dev->ops = &nvme_nvm_dev_ops;
985         dev->private_data = ns;
986         ns->ndev = dev;
987
988         return nvm_register(dev);
989 }
990
991 void nvme_nvm_unregister(struct nvme_ns *ns)
992 {
993         nvm_unregister(ns->ndev);
994 }
995
996 static ssize_t nvm_dev_attr_show(struct device *dev,
997                 struct device_attribute *dattr, char *page)
998 {
999         struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1000         struct nvm_dev *ndev = ns->ndev;
1001         struct nvm_geo *geo = &ndev->geo;
1002         struct attribute *attr;
1003
1004         if (!ndev)
1005                 return 0;
1006
1007         attr = &dattr->attr;
1008
1009         if (strcmp(attr->name, "version") == 0) {
1010                 if (geo->major_ver_id == 1)
1011                         return scnprintf(page, PAGE_SIZE, "%u\n",
1012                                                 geo->major_ver_id);
1013                 else
1014                         return scnprintf(page, PAGE_SIZE, "%u.%u\n",
1015                                                 geo->major_ver_id,
1016                                                 geo->minor_ver_id);
1017         } else if (strcmp(attr->name, "capabilities") == 0) {
1018                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
1019         } else if (strcmp(attr->name, "read_typ") == 0) {
1020                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
1021         } else if (strcmp(attr->name, "read_max") == 0) {
1022                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
1023         } else {
1024                 return scnprintf(page,
1025                                  PAGE_SIZE,
1026                                  "Unhandled attr(%s) in `%s`\n",
1027                                  attr->name, __func__);
1028         }
1029 }
1030
1031 static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
1032 {
1033         return scnprintf(page, PAGE_SIZE,
1034                 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1035                                 ppaf->ch_offset, ppaf->ch_len,
1036                                 ppaf->lun_offset, ppaf->lun_len,
1037                                 ppaf->pln_offset, ppaf->pln_len,
1038                                 ppaf->blk_offset, ppaf->blk_len,
1039                                 ppaf->pg_offset, ppaf->pg_len,
1040                                 ppaf->sec_offset, ppaf->sec_len);
1041 }
1042
1043 static ssize_t nvm_dev_attr_show_12(struct device *dev,
1044                 struct device_attribute *dattr, char *page)
1045 {
1046         struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1047         struct nvm_dev *ndev = ns->ndev;
1048         struct nvm_geo *geo = &ndev->geo;
1049         struct attribute *attr;
1050
1051         if (!ndev)
1052                 return 0;
1053
1054         attr = &dattr->attr;
1055
1056         if (strcmp(attr->name, "vendor_opcode") == 0) {
1057                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
1058         } else if (strcmp(attr->name, "device_mode") == 0) {
1059                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
1060         /* kept for compatibility */
1061         } else if (strcmp(attr->name, "media_manager") == 0) {
1062                 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
1063         } else if (strcmp(attr->name, "ppa_format") == 0) {
1064                 return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
1065         } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
1066                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
1067         } else if (strcmp(attr->name, "flash_media_type") == 0) {
1068                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
1069         } else if (strcmp(attr->name, "num_channels") == 0) {
1070                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1071         } else if (strcmp(attr->name, "num_luns") == 0) {
1072                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1073         } else if (strcmp(attr->name, "num_planes") == 0) {
1074                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
1075         } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
1076                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1077         } else if (strcmp(attr->name, "num_pages") == 0) {
1078                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
1079         } else if (strcmp(attr->name, "page_size") == 0) {
1080                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
1081         } else if (strcmp(attr->name, "hw_sector_size") == 0) {
1082                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
1083         } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
1084                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
1085         } else if (strcmp(attr->name, "prog_typ") == 0) {
1086                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1087         } else if (strcmp(attr->name, "prog_max") == 0) {
1088                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1089         } else if (strcmp(attr->name, "erase_typ") == 0) {
1090                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1091         } else if (strcmp(attr->name, "erase_max") == 0) {
1092                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1093         } else if (strcmp(attr->name, "multiplane_modes") == 0) {
1094                 return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
1095         } else if (strcmp(attr->name, "media_capabilities") == 0) {
1096                 return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
1097         } else if (strcmp(attr->name, "max_phys_secs") == 0) {
1098                 return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
1099         } else {
1100                 return scnprintf(page, PAGE_SIZE,
1101                         "Unhandled attr(%s) in `%s`\n",
1102                         attr->name, __func__);
1103         }
1104 }
1105
1106 static ssize_t nvm_dev_attr_show_20(struct device *dev,
1107                 struct device_attribute *dattr, char *page)
1108 {
1109         struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1110         struct nvm_dev *ndev = ns->ndev;
1111         struct nvm_geo *geo = &ndev->geo;
1112         struct attribute *attr;
1113
1114         if (!ndev)
1115                 return 0;
1116
1117         attr = &dattr->attr;
1118
1119         if (strcmp(attr->name, "groups") == 0) {
1120                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1121         } else if (strcmp(attr->name, "punits") == 0) {
1122                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1123         } else if (strcmp(attr->name, "chunks") == 0) {
1124                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1125         } else if (strcmp(attr->name, "clba") == 0) {
1126                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
1127         } else if (strcmp(attr->name, "ws_min") == 0) {
1128                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
1129         } else if (strcmp(attr->name, "ws_opt") == 0) {
1130                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
1131         } else if (strcmp(attr->name, "maxoc") == 0) {
1132                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
1133         } else if (strcmp(attr->name, "maxocpu") == 0) {
1134                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
1135         } else if (strcmp(attr->name, "mw_cunits") == 0) {
1136                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
1137         } else if (strcmp(attr->name, "write_typ") == 0) {
1138                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1139         } else if (strcmp(attr->name, "write_max") == 0) {
1140                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1141         } else if (strcmp(attr->name, "reset_typ") == 0) {
1142                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1143         } else if (strcmp(attr->name, "reset_max") == 0) {
1144                 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1145         } else {
1146                 return scnprintf(page, PAGE_SIZE,
1147                         "Unhandled attr(%s) in `%s`\n",
1148                         attr->name, __func__);
1149         }
1150 }
1151
1152 #define NVM_DEV_ATTR_RO(_name)                                  \
1153         DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
1154 #define NVM_DEV_ATTR_12_RO(_name)                                       \
1155         DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
1156 #define NVM_DEV_ATTR_20_RO(_name)                                       \
1157         DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
1158
1159 /* general attributes */
1160 static NVM_DEV_ATTR_RO(version);
1161 static NVM_DEV_ATTR_RO(capabilities);
1162
1163 static NVM_DEV_ATTR_RO(read_typ);
1164 static NVM_DEV_ATTR_RO(read_max);
1165
1166 /* 1.2 values */
1167 static NVM_DEV_ATTR_12_RO(vendor_opcode);
1168 static NVM_DEV_ATTR_12_RO(device_mode);
1169 static NVM_DEV_ATTR_12_RO(ppa_format);
1170 static NVM_DEV_ATTR_12_RO(media_manager);
1171 static NVM_DEV_ATTR_12_RO(media_type);
1172 static NVM_DEV_ATTR_12_RO(flash_media_type);
1173 static NVM_DEV_ATTR_12_RO(num_channels);
1174 static NVM_DEV_ATTR_12_RO(num_luns);
1175 static NVM_DEV_ATTR_12_RO(num_planes);
1176 static NVM_DEV_ATTR_12_RO(num_blocks);
1177 static NVM_DEV_ATTR_12_RO(num_pages);
1178 static NVM_DEV_ATTR_12_RO(page_size);
1179 static NVM_DEV_ATTR_12_RO(hw_sector_size);
1180 static NVM_DEV_ATTR_12_RO(oob_sector_size);
1181 static NVM_DEV_ATTR_12_RO(prog_typ);
1182 static NVM_DEV_ATTR_12_RO(prog_max);
1183 static NVM_DEV_ATTR_12_RO(erase_typ);
1184 static NVM_DEV_ATTR_12_RO(erase_max);
1185 static NVM_DEV_ATTR_12_RO(multiplane_modes);
1186 static NVM_DEV_ATTR_12_RO(media_capabilities);
1187 static NVM_DEV_ATTR_12_RO(max_phys_secs);
1188
1189 /* 2.0 values */
1190 static NVM_DEV_ATTR_20_RO(groups);
1191 static NVM_DEV_ATTR_20_RO(punits);
1192 static NVM_DEV_ATTR_20_RO(chunks);
1193 static NVM_DEV_ATTR_20_RO(clba);
1194 static NVM_DEV_ATTR_20_RO(ws_min);
1195 static NVM_DEV_ATTR_20_RO(ws_opt);
1196 static NVM_DEV_ATTR_20_RO(maxoc);
1197 static NVM_DEV_ATTR_20_RO(maxocpu);
1198 static NVM_DEV_ATTR_20_RO(mw_cunits);
1199 static NVM_DEV_ATTR_20_RO(write_typ);
1200 static NVM_DEV_ATTR_20_RO(write_max);
1201 static NVM_DEV_ATTR_20_RO(reset_typ);
1202 static NVM_DEV_ATTR_20_RO(reset_max);
1203
1204 static struct attribute *nvm_dev_attrs[] = {
1205         /* version agnostic attrs */
1206         &dev_attr_version.attr,
1207         &dev_attr_capabilities.attr,
1208         &dev_attr_read_typ.attr,
1209         &dev_attr_read_max.attr,
1210
1211         /* 1.2 attrs */
1212         &dev_attr_vendor_opcode.attr,
1213         &dev_attr_device_mode.attr,
1214         &dev_attr_media_manager.attr,
1215         &dev_attr_ppa_format.attr,
1216         &dev_attr_media_type.attr,
1217         &dev_attr_flash_media_type.attr,
1218         &dev_attr_num_channels.attr,
1219         &dev_attr_num_luns.attr,
1220         &dev_attr_num_planes.attr,
1221         &dev_attr_num_blocks.attr,
1222         &dev_attr_num_pages.attr,
1223         &dev_attr_page_size.attr,
1224         &dev_attr_hw_sector_size.attr,
1225         &dev_attr_oob_sector_size.attr,
1226         &dev_attr_prog_typ.attr,
1227         &dev_attr_prog_max.attr,
1228         &dev_attr_erase_typ.attr,
1229         &dev_attr_erase_max.attr,
1230         &dev_attr_multiplane_modes.attr,
1231         &dev_attr_media_capabilities.attr,
1232         &dev_attr_max_phys_secs.attr,
1233
1234         /* 2.0 attrs */
1235         &dev_attr_groups.attr,
1236         &dev_attr_punits.attr,
1237         &dev_attr_chunks.attr,
1238         &dev_attr_clba.attr,
1239         &dev_attr_ws_min.attr,
1240         &dev_attr_ws_opt.attr,
1241         &dev_attr_maxoc.attr,
1242         &dev_attr_maxocpu.attr,
1243         &dev_attr_mw_cunits.attr,
1244
1245         &dev_attr_write_typ.attr,
1246         &dev_attr_write_max.attr,
1247         &dev_attr_reset_typ.attr,
1248         &dev_attr_reset_max.attr,
1249
1250         NULL,
1251 };
1252
1253 static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
1254                                      struct attribute *attr, int index)
1255 {
1256         struct device *dev = container_of(kobj, struct device, kobj);
1257         struct gendisk *disk = dev_to_disk(dev);
1258         struct nvme_ns *ns = disk->private_data;
1259         struct nvm_dev *ndev = ns->ndev;
1260         struct device_attribute *dev_attr =
1261                 container_of(attr, typeof(*dev_attr), attr);
1262
1263         if (!ndev)
1264                 return 0;
1265
1266         if (dev_attr->show == nvm_dev_attr_show)
1267                 return attr->mode;
1268
1269         switch (ndev->geo.major_ver_id) {
1270         case 1:
1271                 if (dev_attr->show == nvm_dev_attr_show_12)
1272                         return attr->mode;
1273                 break;
1274         case 2:
1275                 if (dev_attr->show == nvm_dev_attr_show_20)
1276                         return attr->mode;
1277                 break;
1278         }
1279
1280         return 0;
1281 }
1282
1283 const struct attribute_group nvme_nvm_attr_group = {
1284         .name           = "lightnvm",
1285         .attrs          = nvm_dev_attrs,
1286         .is_visible     = nvm_dev_attrs_visible,
1287 };