a982f925dfcef48c24f06b904fcbfb81c6fcf846
[linux-2.6-block.git] / drivers / nvme / target / admin-cmd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
13 #include "nvmet.h"
14
15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17         u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19         len <<= 16;
20         len += le16_to_cpu(cmd->get_log_page.numdl);
21         /* NUMD is a 0's based value */
22         len += 1;
23         len *= sizeof(u32);
24
25         return len;
26 }
27
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30         switch (cdw10 & 0xff) {
31         case NVME_FEAT_HOST_ID:
32                 return sizeof(req->sq->ctrl->hostid);
33         default:
34                 return 0;
35         }
36 }
37
38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40         return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45         nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50         struct nvmet_ctrl *ctrl = req->sq->ctrl;
51         unsigned long flags;
52         off_t offset = 0;
53         u64 slot;
54         u64 i;
55
56         spin_lock_irqsave(&ctrl->error_lock, flags);
57         slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59         for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60                 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61                                 sizeof(struct nvme_error_slot)))
62                         break;
63
64                 if (slot == 0)
65                         slot = NVMET_ERROR_LOG_SLOTS - 1;
66                 else
67                         slot--;
68                 offset += sizeof(struct nvme_error_slot);
69         }
70         spin_unlock_irqrestore(&ctrl->error_lock, flags);
71         nvmet_req_complete(req, 0);
72 }
73
74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75                 struct nvme_smart_log *slog)
76 {
77         u64 host_reads, host_writes, data_units_read, data_units_written;
78         u16 status;
79
80         status = nvmet_req_find_ns(req);
81         if (status)
82                 return status;
83
84         /* we don't have the right data for file backed ns */
85         if (!req->ns->bdev)
86                 return NVME_SC_SUCCESS;
87
88         host_reads = part_stat_read(req->ns->bdev, ios[READ]);
89         data_units_read =
90                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91         host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
92         data_units_written =
93                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
94
95         put_unaligned_le64(host_reads, &slog->host_reads[0]);
96         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97         put_unaligned_le64(host_writes, &slog->host_writes[0]);
98         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
99
100         return NVME_SC_SUCCESS;
101 }
102
103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104                 struct nvme_smart_log *slog)
105 {
106         u64 host_reads = 0, host_writes = 0;
107         u64 data_units_read = 0, data_units_written = 0;
108         struct nvmet_ns *ns;
109         struct nvmet_ctrl *ctrl;
110         unsigned long idx;
111
112         ctrl = req->sq->ctrl;
113         xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
114                 /* we don't have the right data for file backed ns */
115                 if (!ns->bdev)
116                         continue;
117                 host_reads += part_stat_read(ns->bdev, ios[READ]);
118                 data_units_read += DIV_ROUND_UP(
119                         part_stat_read(ns->bdev, sectors[READ]), 1000);
120                 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
121                 data_units_written += DIV_ROUND_UP(
122                         part_stat_read(ns->bdev, sectors[WRITE]), 1000);
123         }
124
125         put_unaligned_le64(host_reads, &slog->host_reads[0]);
126         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127         put_unaligned_le64(host_writes, &slog->host_writes[0]);
128         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129
130         return NVME_SC_SUCCESS;
131 }
132
133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
134 {
135         struct nvme_smart_log *log;
136         u16 status = NVME_SC_INTERNAL;
137         unsigned long flags;
138
139         if (req->transfer_len != sizeof(*log))
140                 goto out;
141
142         log = kzalloc(sizeof(*log), GFP_KERNEL);
143         if (!log)
144                 goto out;
145
146         if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147                 status = nvmet_get_smart_log_all(req, log);
148         else
149                 status = nvmet_get_smart_log_nsid(req, log);
150         if (status)
151                 goto out_free_log;
152
153         spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154         put_unaligned_le64(req->sq->ctrl->err_counter,
155                         &log->num_err_log_entries);
156         spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157
158         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159 out_free_log:
160         kfree(log);
161 out:
162         nvmet_req_complete(req, status);
163 }
164
165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
166 {
167         log->acs[nvme_admin_get_log_page] =
168         log->acs[nvme_admin_identify] =
169         log->acs[nvme_admin_abort_cmd] =
170         log->acs[nvme_admin_set_features] =
171         log->acs[nvme_admin_get_features] =
172         log->acs[nvme_admin_async_event] =
173         log->acs[nvme_admin_keep_alive] =
174                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
175
176         log->iocs[nvme_cmd_read] =
177         log->iocs[nvme_cmd_flush] =
178         log->iocs[nvme_cmd_dsm] =
179                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
180         log->iocs[nvme_cmd_write] =
181         log->iocs[nvme_cmd_write_zeroes] =
182                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
183 }
184
185 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
186 {
187         log->iocs[nvme_cmd_zone_append] =
188         log->iocs[nvme_cmd_zone_mgmt_send] =
189                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
190         log->iocs[nvme_cmd_zone_mgmt_recv] =
191                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
192 }
193
194 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
195 {
196         struct nvme_effects_log *log;
197         u16 status = NVME_SC_SUCCESS;
198
199         log = kzalloc(sizeof(*log), GFP_KERNEL);
200         if (!log) {
201                 status = NVME_SC_INTERNAL;
202                 goto out;
203         }
204
205         switch (req->cmd->get_log_page.csi) {
206         case NVME_CSI_NVM:
207                 nvmet_get_cmd_effects_nvm(log);
208                 break;
209         case NVME_CSI_ZNS:
210                 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
211                         status = NVME_SC_INVALID_IO_CMD_SET;
212                         goto free;
213                 }
214                 nvmet_get_cmd_effects_nvm(log);
215                 nvmet_get_cmd_effects_zns(log);
216                 break;
217         default:
218                 status = NVME_SC_INVALID_LOG_PAGE;
219                 goto free;
220         }
221
222         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
223 free:
224         kfree(log);
225 out:
226         nvmet_req_complete(req, status);
227 }
228
229 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
230 {
231         struct nvmet_ctrl *ctrl = req->sq->ctrl;
232         u16 status = NVME_SC_INTERNAL;
233         size_t len;
234
235         if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
236                 goto out;
237
238         mutex_lock(&ctrl->lock);
239         if (ctrl->nr_changed_ns == U32_MAX)
240                 len = sizeof(__le32);
241         else
242                 len = ctrl->nr_changed_ns * sizeof(__le32);
243         status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
244         if (!status)
245                 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
246         ctrl->nr_changed_ns = 0;
247         nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
248         mutex_unlock(&ctrl->lock);
249 out:
250         nvmet_req_complete(req, status);
251 }
252
253 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
254                 struct nvme_ana_group_desc *desc)
255 {
256         struct nvmet_ctrl *ctrl = req->sq->ctrl;
257         struct nvmet_ns *ns;
258         unsigned long idx;
259         u32 count = 0;
260
261         if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
262                 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
263                         if (ns->anagrpid == grpid)
264                                 desc->nsids[count++] = cpu_to_le32(ns->nsid);
265         }
266
267         desc->grpid = cpu_to_le32(grpid);
268         desc->nnsids = cpu_to_le32(count);
269         desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
270         desc->state = req->port->ana_state[grpid];
271         memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
272         return struct_size(desc, nsids, count);
273 }
274
275 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
276 {
277         struct nvme_ana_rsp_hdr hdr = { 0, };
278         struct nvme_ana_group_desc *desc;
279         size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
280         size_t len;
281         u32 grpid;
282         u16 ngrps = 0;
283         u16 status;
284
285         status = NVME_SC_INTERNAL;
286         desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
287                        GFP_KERNEL);
288         if (!desc)
289                 goto out;
290
291         down_read(&nvmet_ana_sem);
292         for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
293                 if (!nvmet_ana_group_enabled[grpid])
294                         continue;
295                 len = nvmet_format_ana_group(req, grpid, desc);
296                 status = nvmet_copy_to_sgl(req, offset, desc, len);
297                 if (status)
298                         break;
299                 offset += len;
300                 ngrps++;
301         }
302         for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
303                 if (nvmet_ana_group_enabled[grpid])
304                         ngrps++;
305         }
306
307         hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
308         hdr.ngrps = cpu_to_le16(ngrps);
309         nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
310         up_read(&nvmet_ana_sem);
311
312         kfree(desc);
313
314         /* copy the header last once we know the number of groups */
315         status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
316 out:
317         nvmet_req_complete(req, status);
318 }
319
320 static void nvmet_execute_get_log_page(struct nvmet_req *req)
321 {
322         if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
323                 return;
324
325         switch (req->cmd->get_log_page.lid) {
326         case NVME_LOG_ERROR:
327                 return nvmet_execute_get_log_page_error(req);
328         case NVME_LOG_SMART:
329                 return nvmet_execute_get_log_page_smart(req);
330         case NVME_LOG_FW_SLOT:
331                 /*
332                  * We only support a single firmware slot which always is
333                  * active, so we can zero out the whole firmware slot log and
334                  * still claim to fully implement this mandatory log page.
335                  */
336                 return nvmet_execute_get_log_page_noop(req);
337         case NVME_LOG_CHANGED_NS:
338                 return nvmet_execute_get_log_changed_ns(req);
339         case NVME_LOG_CMD_EFFECTS:
340                 return nvmet_execute_get_log_cmd_effects_ns(req);
341         case NVME_LOG_ANA:
342                 return nvmet_execute_get_log_page_ana(req);
343         }
344         pr_debug("unhandled lid %d on qid %d\n",
345                req->cmd->get_log_page.lid, req->sq->qid);
346         req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
347         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
348 }
349
350 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
351 {
352         struct nvmet_ctrl *ctrl = req->sq->ctrl;
353         struct nvmet_subsys *subsys = ctrl->subsys;
354         struct nvme_id_ctrl *id;
355         u32 cmd_capsule_size;
356         u16 status = 0;
357
358         if (!subsys->subsys_discovered) {
359                 mutex_lock(&subsys->lock);
360                 subsys->subsys_discovered = true;
361                 mutex_unlock(&subsys->lock);
362         }
363
364         id = kzalloc(sizeof(*id), GFP_KERNEL);
365         if (!id) {
366                 status = NVME_SC_INTERNAL;
367                 goto out;
368         }
369
370         /* XXX: figure out how to assign real vendors IDs. */
371         id->vid = 0;
372         id->ssvid = 0;
373
374         memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
375         memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
376                        strlen(subsys->model_number), ' ');
377         memcpy_and_pad(id->fr, sizeof(id->fr),
378                        subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
379
380         put_unaligned_le24(subsys->ieee_oui, id->ieee);
381
382         id->rab = 6;
383
384         if (nvmet_is_disc_subsys(ctrl->subsys))
385                 id->cntrltype = NVME_CTRL_DISC;
386         else
387                 id->cntrltype = NVME_CTRL_IO;
388
389         /* we support multiple ports, multiples hosts and ANA: */
390         id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
391                 NVME_CTRL_CMIC_ANA;
392
393         /* Limit MDTS according to transport capability */
394         if (ctrl->ops->get_mdts)
395                 id->mdts = ctrl->ops->get_mdts(ctrl);
396         else
397                 id->mdts = 0;
398
399         id->cntlid = cpu_to_le16(ctrl->cntlid);
400         id->ver = cpu_to_le32(ctrl->subsys->ver);
401
402         /* XXX: figure out what to do about RTD3R/RTD3 */
403         id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
404         id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
405                 NVME_CTRL_ATTR_TBKAS);
406
407         id->oacs = 0;
408
409         /*
410          * We don't really have a practical limit on the number of abort
411          * comands.  But we don't do anything useful for abort either, so
412          * no point in allowing more abort commands than the spec requires.
413          */
414         id->acl = 3;
415
416         id->aerl = NVMET_ASYNC_EVENTS - 1;
417
418         /* first slot is read-only, only one slot supported */
419         id->frmw = (1 << 0) | (1 << 1);
420         id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
421         id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
422         id->npss = 0;
423
424         /* We support keep-alive timeout in granularity of seconds */
425         id->kas = cpu_to_le16(NVMET_KAS);
426
427         id->sqes = (0x6 << 4) | 0x6;
428         id->cqes = (0x4 << 4) | 0x4;
429
430         /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
431         id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
432
433         id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
434         id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
435         id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
436                         NVME_CTRL_ONCS_WRITE_ZEROES);
437
438         /* XXX: don't report vwc if the underlying device is write through */
439         id->vwc = NVME_CTRL_VWC_PRESENT;
440
441         /*
442          * We can't support atomic writes bigger than a LBA without support
443          * from the backend device.
444          */
445         id->awun = 0;
446         id->awupf = 0;
447
448         id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
449         if (ctrl->ops->flags & NVMF_KEYED_SGLS)
450                 id->sgls |= cpu_to_le32(1 << 2);
451         if (req->port->inline_data_size)
452                 id->sgls |= cpu_to_le32(1 << 20);
453
454         strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
455
456         /*
457          * Max command capsule size is sqe + in-capsule data size.
458          * Disable in-capsule data for Metadata capable controllers.
459          */
460         cmd_capsule_size = sizeof(struct nvme_command);
461         if (!ctrl->pi_support)
462                 cmd_capsule_size += req->port->inline_data_size;
463         id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
464
465         /* Max response capsule size is cqe */
466         id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
467
468         id->msdbd = ctrl->ops->msdbd;
469
470         id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
471         id->anatt = 10; /* random value */
472         id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
473         id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
474
475         /*
476          * Meh, we don't really support any power state.  Fake up the same
477          * values that qemu does.
478          */
479         id->psd[0].max_power = cpu_to_le16(0x9c4);
480         id->psd[0].entry_lat = cpu_to_le32(0x10);
481         id->psd[0].exit_lat = cpu_to_le32(0x4);
482
483         id->nwpc = 1 << 0; /* write protect and no write protect */
484
485         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
486
487         kfree(id);
488 out:
489         nvmet_req_complete(req, status);
490 }
491
492 static void nvmet_execute_identify_ns(struct nvmet_req *req)
493 {
494         struct nvme_id_ns *id;
495         u16 status;
496
497         if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
498                 req->error_loc = offsetof(struct nvme_identify, nsid);
499                 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
500                 goto out;
501         }
502
503         id = kzalloc(sizeof(*id), GFP_KERNEL);
504         if (!id) {
505                 status = NVME_SC_INTERNAL;
506                 goto out;
507         }
508
509         /* return an all zeroed buffer if we can't find an active namespace */
510         status = nvmet_req_find_ns(req);
511         if (status) {
512                 status = 0;
513                 goto done;
514         }
515
516         if (nvmet_ns_revalidate(req->ns)) {
517                 mutex_lock(&req->ns->subsys->lock);
518                 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
519                 mutex_unlock(&req->ns->subsys->lock);
520         }
521
522         /*
523          * nuse = ncap = nsze isn't always true, but we have no way to find
524          * that out from the underlying device.
525          */
526         id->ncap = id->nsze =
527                 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
528         switch (req->port->ana_state[req->ns->anagrpid]) {
529         case NVME_ANA_INACCESSIBLE:
530         case NVME_ANA_PERSISTENT_LOSS:
531                 break;
532         default:
533                 id->nuse = id->nsze;
534                 break;
535         }
536
537         if (req->ns->bdev)
538                 nvmet_bdev_set_limits(req->ns->bdev, id);
539
540         /*
541          * We just provide a single LBA format that matches what the
542          * underlying device reports.
543          */
544         id->nlbaf = 0;
545         id->flbas = 0;
546
547         /*
548          * Our namespace might always be shared.  Not just with other
549          * controllers, but also with any other user of the block device.
550          */
551         id->nmic = NVME_NS_NMIC_SHARED;
552         id->anagrpid = cpu_to_le32(req->ns->anagrpid);
553
554         memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
555
556         id->lbaf[0].ds = req->ns->blksize_shift;
557
558         if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
559                 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
560                           NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
561                           NVME_NS_DPC_PI_TYPE3;
562                 id->mc = NVME_MC_EXTENDED_LBA;
563                 id->dps = req->ns->pi_type;
564                 id->flbas = NVME_NS_FLBAS_META_EXT;
565                 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
566         }
567
568         if (req->ns->readonly)
569                 id->nsattr |= NVME_NS_ATTR_RO;
570 done:
571         if (!status)
572                 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
573
574         kfree(id);
575 out:
576         nvmet_req_complete(req, status);
577 }
578
579 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
580 {
581         static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
582         struct nvmet_ctrl *ctrl = req->sq->ctrl;
583         struct nvmet_ns *ns;
584         unsigned long idx;
585         u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
586         __le32 *list;
587         u16 status = 0;
588         int i = 0;
589
590         list = kzalloc(buf_size, GFP_KERNEL);
591         if (!list) {
592                 status = NVME_SC_INTERNAL;
593                 goto out;
594         }
595
596         xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
597                 if (ns->nsid <= min_nsid)
598                         continue;
599                 list[i++] = cpu_to_le32(ns->nsid);
600                 if (i == buf_size / sizeof(__le32))
601                         break;
602         }
603
604         status = nvmet_copy_to_sgl(req, 0, list, buf_size);
605
606         kfree(list);
607 out:
608         nvmet_req_complete(req, status);
609 }
610
611 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
612                                     void *id, off_t *off)
613 {
614         struct nvme_ns_id_desc desc = {
615                 .nidt = type,
616                 .nidl = len,
617         };
618         u16 status;
619
620         status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
621         if (status)
622                 return status;
623         *off += sizeof(desc);
624
625         status = nvmet_copy_to_sgl(req, *off, id, len);
626         if (status)
627                 return status;
628         *off += len;
629
630         return 0;
631 }
632
633 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
634 {
635         off_t off = 0;
636         u16 status;
637
638         status = nvmet_req_find_ns(req);
639         if (status)
640                 goto out;
641
642         if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
643                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
644                                                   NVME_NIDT_UUID_LEN,
645                                                   &req->ns->uuid, &off);
646                 if (status)
647                         goto out;
648         }
649         if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
650                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
651                                                   NVME_NIDT_NGUID_LEN,
652                                                   &req->ns->nguid, &off);
653                 if (status)
654                         goto out;
655         }
656
657         status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
658                                           NVME_NIDT_CSI_LEN,
659                                           &req->ns->csi, &off);
660         if (status)
661                 goto out;
662
663         if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
664                         off) != NVME_IDENTIFY_DATA_SIZE - off)
665                 status = NVME_SC_INTERNAL | NVME_SC_DNR;
666
667 out:
668         nvmet_req_complete(req, status);
669 }
670
671 static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
672 {
673         switch (req->cmd->identify.csi) {
674         case NVME_CSI_NVM:
675                 nvmet_execute_identify_desclist(req);
676                 return true;
677         case NVME_CSI_ZNS:
678                 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
679                         nvmet_execute_identify_desclist(req);
680                         return true;
681                 }
682                 return false;
683         default:
684                 return false;
685         }
686 }
687
688 static void nvmet_execute_identify(struct nvmet_req *req)
689 {
690         if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
691                 return;
692
693         switch (req->cmd->identify.cns) {
694         case NVME_ID_CNS_NS:
695                 nvmet_execute_identify_ns(req);
696                 return;
697         case NVME_ID_CNS_CS_NS:
698                 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
699                         switch (req->cmd->identify.csi) {
700                         case NVME_CSI_ZNS:
701                                 return nvmet_execute_identify_cns_cs_ns(req);
702                         default:
703                                 break;
704                         }
705                 }
706                 break;
707         case NVME_ID_CNS_CTRL:
708                 switch (req->cmd->identify.csi) {
709                 case NVME_CSI_NVM:
710                         return nvmet_execute_identify_ctrl(req);
711                 }
712                 break;
713         case NVME_ID_CNS_CS_CTRL:
714                 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
715                         switch (req->cmd->identify.csi) {
716                         case NVME_CSI_ZNS:
717                                 return nvmet_execute_identify_cns_cs_ctrl(req);
718                         default:
719                                 break;
720                         }
721                 }
722                 break;
723         case NVME_ID_CNS_NS_ACTIVE_LIST:
724                 switch (req->cmd->identify.csi) {
725                 case NVME_CSI_NVM:
726                         return nvmet_execute_identify_nslist(req);
727                 default:
728                         break;
729                 }
730                 break;
731         case NVME_ID_CNS_NS_DESC_LIST:
732                 if (nvmet_handle_identify_desclist(req) == true)
733                         return;
734                 break;
735         }
736
737         nvmet_req_cns_error_complete(req);
738 }
739
740 /*
741  * A "minimum viable" abort implementation: the command is mandatory in the
742  * spec, but we are not required to do any useful work.  We couldn't really
743  * do a useful abort, so don't bother even with waiting for the command
744  * to be exectuted and return immediately telling the command to abort
745  * wasn't found.
746  */
747 static void nvmet_execute_abort(struct nvmet_req *req)
748 {
749         if (!nvmet_check_transfer_len(req, 0))
750                 return;
751         nvmet_set_result(req, 1);
752         nvmet_req_complete(req, 0);
753 }
754
755 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
756 {
757         u16 status;
758
759         if (req->ns->file)
760                 status = nvmet_file_flush(req);
761         else
762                 status = nvmet_bdev_flush(req);
763
764         if (status)
765                 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
766         return status;
767 }
768
769 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
770 {
771         u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
772         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
773         u16 status;
774
775         status = nvmet_req_find_ns(req);
776         if (status)
777                 return status;
778
779         mutex_lock(&subsys->lock);
780         switch (write_protect) {
781         case NVME_NS_WRITE_PROTECT:
782                 req->ns->readonly = true;
783                 status = nvmet_write_protect_flush_sync(req);
784                 if (status)
785                         req->ns->readonly = false;
786                 break;
787         case NVME_NS_NO_WRITE_PROTECT:
788                 req->ns->readonly = false;
789                 status = 0;
790                 break;
791         default:
792                 break;
793         }
794
795         if (!status)
796                 nvmet_ns_changed(subsys, req->ns->nsid);
797         mutex_unlock(&subsys->lock);
798         return status;
799 }
800
801 u16 nvmet_set_feat_kato(struct nvmet_req *req)
802 {
803         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
804
805         nvmet_stop_keep_alive_timer(req->sq->ctrl);
806         req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
807         nvmet_start_keep_alive_timer(req->sq->ctrl);
808
809         nvmet_set_result(req, req->sq->ctrl->kato);
810
811         return 0;
812 }
813
814 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
815 {
816         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
817
818         if (val32 & ~mask) {
819                 req->error_loc = offsetof(struct nvme_common_command, cdw11);
820                 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
821         }
822
823         WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
824         nvmet_set_result(req, val32);
825
826         return 0;
827 }
828
829 void nvmet_execute_set_features(struct nvmet_req *req)
830 {
831         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
832         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
833         u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
834         u16 status = 0;
835         u16 nsqr;
836         u16 ncqr;
837
838         if (!nvmet_check_data_len_lte(req, 0))
839                 return;
840
841         switch (cdw10 & 0xff) {
842         case NVME_FEAT_NUM_QUEUES:
843                 ncqr = (cdw11 >> 16) & 0xffff;
844                 nsqr = cdw11 & 0xffff;
845                 if (ncqr == 0xffff || nsqr == 0xffff) {
846                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
847                         break;
848                 }
849                 nvmet_set_result(req,
850                         (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
851                 break;
852         case NVME_FEAT_KATO:
853                 status = nvmet_set_feat_kato(req);
854                 break;
855         case NVME_FEAT_ASYNC_EVENT:
856                 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
857                 break;
858         case NVME_FEAT_HOST_ID:
859                 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
860                 break;
861         case NVME_FEAT_WRITE_PROTECT:
862                 status = nvmet_set_feat_write_protect(req);
863                 break;
864         default:
865                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
866                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
867                 break;
868         }
869
870         nvmet_req_complete(req, status);
871 }
872
873 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
874 {
875         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
876         u32 result;
877
878         result = nvmet_req_find_ns(req);
879         if (result)
880                 return result;
881
882         mutex_lock(&subsys->lock);
883         if (req->ns->readonly == true)
884                 result = NVME_NS_WRITE_PROTECT;
885         else
886                 result = NVME_NS_NO_WRITE_PROTECT;
887         nvmet_set_result(req, result);
888         mutex_unlock(&subsys->lock);
889
890         return 0;
891 }
892
893 void nvmet_get_feat_kato(struct nvmet_req *req)
894 {
895         nvmet_set_result(req, req->sq->ctrl->kato * 1000);
896 }
897
898 void nvmet_get_feat_async_event(struct nvmet_req *req)
899 {
900         nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
901 }
902
903 void nvmet_execute_get_features(struct nvmet_req *req)
904 {
905         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
906         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
907         u16 status = 0;
908
909         if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
910                 return;
911
912         switch (cdw10 & 0xff) {
913         /*
914          * These features are mandatory in the spec, but we don't
915          * have a useful way to implement them.  We'll eventually
916          * need to come up with some fake values for these.
917          */
918 #if 0
919         case NVME_FEAT_ARBITRATION:
920                 break;
921         case NVME_FEAT_POWER_MGMT:
922                 break;
923         case NVME_FEAT_TEMP_THRESH:
924                 break;
925         case NVME_FEAT_ERR_RECOVERY:
926                 break;
927         case NVME_FEAT_IRQ_COALESCE:
928                 break;
929         case NVME_FEAT_IRQ_CONFIG:
930                 break;
931         case NVME_FEAT_WRITE_ATOMIC:
932                 break;
933 #endif
934         case NVME_FEAT_ASYNC_EVENT:
935                 nvmet_get_feat_async_event(req);
936                 break;
937         case NVME_FEAT_VOLATILE_WC:
938                 nvmet_set_result(req, 1);
939                 break;
940         case NVME_FEAT_NUM_QUEUES:
941                 nvmet_set_result(req,
942                         (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
943                 break;
944         case NVME_FEAT_KATO:
945                 nvmet_get_feat_kato(req);
946                 break;
947         case NVME_FEAT_HOST_ID:
948                 /* need 128-bit host identifier flag */
949                 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
950                         req->error_loc =
951                                 offsetof(struct nvme_common_command, cdw11);
952                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
953                         break;
954                 }
955
956                 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
957                                 sizeof(req->sq->ctrl->hostid));
958                 break;
959         case NVME_FEAT_WRITE_PROTECT:
960                 status = nvmet_get_feat_write_protect(req);
961                 break;
962         default:
963                 req->error_loc =
964                         offsetof(struct nvme_common_command, cdw10);
965                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
966                 break;
967         }
968
969         nvmet_req_complete(req, status);
970 }
971
972 void nvmet_execute_async_event(struct nvmet_req *req)
973 {
974         struct nvmet_ctrl *ctrl = req->sq->ctrl;
975
976         if (!nvmet_check_transfer_len(req, 0))
977                 return;
978
979         mutex_lock(&ctrl->lock);
980         if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
981                 mutex_unlock(&ctrl->lock);
982                 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
983                 return;
984         }
985         ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
986         mutex_unlock(&ctrl->lock);
987
988         queue_work(nvmet_wq, &ctrl->async_event_work);
989 }
990
991 void nvmet_execute_keep_alive(struct nvmet_req *req)
992 {
993         struct nvmet_ctrl *ctrl = req->sq->ctrl;
994         u16 status = 0;
995
996         if (!nvmet_check_transfer_len(req, 0))
997                 return;
998
999         if (!ctrl->kato) {
1000                 status = NVME_SC_KA_TIMEOUT_INVALID;
1001                 goto out;
1002         }
1003
1004         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1005                 ctrl->cntlid, ctrl->kato);
1006         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1007 out:
1008         nvmet_req_complete(req, status);
1009 }
1010
1011 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1012 {
1013         struct nvme_command *cmd = req->cmd;
1014         u16 ret;
1015
1016         if (nvme_is_fabrics(cmd))
1017                 return nvmet_parse_fabrics_admin_cmd(req);
1018         if (unlikely(!nvmet_check_auth_status(req)))
1019                 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1020         if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1021                 return nvmet_parse_discovery_cmd(req);
1022
1023         ret = nvmet_check_ctrl_status(req);
1024         if (unlikely(ret))
1025                 return ret;
1026
1027         if (nvmet_is_passthru_req(req))
1028                 return nvmet_parse_passthru_admin_cmd(req);
1029
1030         switch (cmd->common.opcode) {
1031         case nvme_admin_get_log_page:
1032                 req->execute = nvmet_execute_get_log_page;
1033                 return 0;
1034         case nvme_admin_identify:
1035                 req->execute = nvmet_execute_identify;
1036                 return 0;
1037         case nvme_admin_abort_cmd:
1038                 req->execute = nvmet_execute_abort;
1039                 return 0;
1040         case nvme_admin_set_features:
1041                 req->execute = nvmet_execute_set_features;
1042                 return 0;
1043         case nvme_admin_get_features:
1044                 req->execute = nvmet_execute_get_features;
1045                 return 0;
1046         case nvme_admin_async_event:
1047                 req->execute = nvmet_execute_async_event;
1048                 return 0;
1049         case nvme_admin_keep_alive:
1050                 req->execute = nvmet_execute_keep_alive;
1051                 return 0;
1052         default:
1053                 return nvmet_report_invalid_opcode(req);
1054         }
1055 }