1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
11 #include <generated/utsrelease.h>
12 #include <linux/unaligned.h>
15 static void nvmet_execute_delete_sq(struct nvmet_req *req)
17 struct nvmet_ctrl *ctrl = req->sq->ctrl;
18 u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
21 if (!nvmet_is_pci_ctrl(ctrl)) {
22 status = nvmet_report_invalid_opcode(req);
27 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
31 status = nvmet_check_sqid(ctrl, sqid, false);
32 if (status != NVME_SC_SUCCESS)
35 status = ctrl->ops->delete_sq(ctrl, sqid);
38 nvmet_req_complete(req, status);
41 static void nvmet_execute_create_sq(struct nvmet_req *req)
43 struct nvmet_ctrl *ctrl = req->sq->ctrl;
44 struct nvme_command *cmd = req->cmd;
45 u16 sqid = le16_to_cpu(cmd->create_sq.sqid);
46 u16 cqid = le16_to_cpu(cmd->create_sq.cqid);
47 u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags);
48 u16 qsize = le16_to_cpu(cmd->create_sq.qsize);
49 u64 prp1 = le64_to_cpu(cmd->create_sq.prp1);
52 if (!nvmet_is_pci_ctrl(ctrl)) {
53 status = nvmet_report_invalid_opcode(req);
58 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
62 status = nvmet_check_sqid(ctrl, sqid, true);
63 if (status != NVME_SC_SUCCESS)
66 status = nvmet_check_io_cqid(ctrl, cqid, false);
67 if (status != NVME_SC_SUCCESS) {
68 pr_err("SQ %u: Invalid CQID %u\n", sqid, cqid);
72 if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
73 status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
77 status = ctrl->ops->create_sq(ctrl, sqid, cqid, sq_flags, qsize, prp1);
80 nvmet_req_complete(req, status);
83 static void nvmet_execute_delete_cq(struct nvmet_req *req)
85 struct nvmet_ctrl *ctrl = req->sq->ctrl;
86 u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
89 if (!nvmet_is_pci_ctrl(ctrl)) {
90 status = nvmet_report_invalid_opcode(req);
94 status = nvmet_check_io_cqid(ctrl, cqid, false);
95 if (status != NVME_SC_SUCCESS)
98 if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) {
99 /* Some SQs are still using this CQ */
100 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
104 status = ctrl->ops->delete_cq(ctrl, cqid);
107 nvmet_req_complete(req, status);
110 static void nvmet_execute_create_cq(struct nvmet_req *req)
112 struct nvmet_ctrl *ctrl = req->sq->ctrl;
113 struct nvme_command *cmd = req->cmd;
114 u16 cqid = le16_to_cpu(cmd->create_cq.cqid);
115 u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags);
116 u16 qsize = le16_to_cpu(cmd->create_cq.qsize);
117 u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector);
118 u64 prp1 = le64_to_cpu(cmd->create_cq.prp1);
121 if (!nvmet_is_pci_ctrl(ctrl)) {
122 status = nvmet_report_invalid_opcode(req);
126 status = nvmet_check_io_cqid(ctrl, cqid, true);
127 if (status != NVME_SC_SUCCESS)
130 if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
131 status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
135 status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize,
139 nvmet_req_complete(req, status);
142 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
144 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
147 len += le16_to_cpu(cmd->get_log_page.numdl);
148 /* NUMD is a 0's based value */
155 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
157 switch (cdw10 & 0xff) {
158 case NVME_FEAT_HOST_ID:
159 return sizeof(req->sq->ctrl->hostid);
165 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
167 return le64_to_cpu(cmd->get_log_page.lpo);
170 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
172 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
175 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
177 struct nvmet_ctrl *ctrl = req->sq->ctrl;
183 spin_lock_irqsave(&ctrl->error_lock, flags);
184 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
186 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
187 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
188 sizeof(struct nvme_error_slot)))
192 slot = NVMET_ERROR_LOG_SLOTS - 1;
195 offset += sizeof(struct nvme_error_slot);
197 spin_unlock_irqrestore(&ctrl->error_lock, flags);
198 nvmet_req_complete(req, 0);
201 static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
203 struct nvme_supported_log *logs;
206 logs = kzalloc(sizeof(*logs), GFP_KERNEL);
208 status = NVME_SC_INTERNAL;
212 logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
213 logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
214 logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
215 logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
216 logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
217 logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
218 logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
219 logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
220 logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
221 logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
222 logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
224 status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
227 nvmet_req_complete(req, status);
230 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
231 struct nvme_smart_log *slog)
233 u64 host_reads, host_writes, data_units_read, data_units_written;
236 status = nvmet_req_find_ns(req);
240 /* we don't have the right data for file backed ns */
242 return NVME_SC_SUCCESS;
244 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
246 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
247 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
249 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
251 put_unaligned_le64(host_reads, &slog->host_reads[0]);
252 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
253 put_unaligned_le64(host_writes, &slog->host_writes[0]);
254 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
256 return NVME_SC_SUCCESS;
259 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
260 struct nvme_smart_log *slog)
262 u64 host_reads = 0, host_writes = 0;
263 u64 data_units_read = 0, data_units_written = 0;
265 struct nvmet_ctrl *ctrl;
268 ctrl = req->sq->ctrl;
269 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
270 /* we don't have the right data for file backed ns */
273 host_reads += part_stat_read(ns->bdev, ios[READ]);
274 data_units_read += DIV_ROUND_UP(
275 part_stat_read(ns->bdev, sectors[READ]), 1000);
276 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
277 data_units_written += DIV_ROUND_UP(
278 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
281 put_unaligned_le64(host_reads, &slog->host_reads[0]);
282 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
283 put_unaligned_le64(host_writes, &slog->host_writes[0]);
284 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
286 return NVME_SC_SUCCESS;
289 static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
291 struct nvme_rotational_media_log *log;
292 struct gendisk *disk;
295 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
296 req->cmd->get_log_page.lsi));
297 status = nvmet_req_find_ns(req);
301 if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) {
302 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
306 if (req->transfer_len != sizeof(*log)) {
307 status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
311 log = kzalloc(sizeof(*log), GFP_KERNEL);
315 log->endgid = req->cmd->get_log_page.lsi;
316 disk = req->ns->bdev->bd_disk;
317 if (disk && disk->ia_ranges)
318 log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
320 log->numa = cpu_to_le16(1);
322 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
325 nvmet_req_complete(req, status);
328 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
330 struct nvme_smart_log *log;
331 u16 status = NVME_SC_INTERNAL;
334 if (req->transfer_len != sizeof(*log))
337 log = kzalloc(sizeof(*log), GFP_KERNEL);
341 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
342 status = nvmet_get_smart_log_all(req, log);
344 status = nvmet_get_smart_log_nsid(req, log);
348 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
349 put_unaligned_le64(req->sq->ctrl->err_counter,
350 &log->num_err_log_entries);
351 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
353 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
357 nvmet_req_complete(req, status);
360 static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
361 struct nvme_effects_log *log)
363 /* For a PCI target controller, advertize support for the . */
364 if (nvmet_is_pci_ctrl(ctrl)) {
365 log->acs[nvme_admin_delete_sq] =
366 log->acs[nvme_admin_create_sq] =
367 log->acs[nvme_admin_delete_cq] =
368 log->acs[nvme_admin_create_cq] =
369 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
372 log->acs[nvme_admin_get_log_page] =
373 log->acs[nvme_admin_identify] =
374 log->acs[nvme_admin_abort_cmd] =
375 log->acs[nvme_admin_set_features] =
376 log->acs[nvme_admin_get_features] =
377 log->acs[nvme_admin_async_event] =
378 log->acs[nvme_admin_keep_alive] =
379 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
382 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
384 log->iocs[nvme_cmd_read] =
385 log->iocs[nvme_cmd_flush] =
386 log->iocs[nvme_cmd_dsm] =
387 log->iocs[nvme_cmd_resv_acquire] =
388 log->iocs[nvme_cmd_resv_register] =
389 log->iocs[nvme_cmd_resv_release] =
390 log->iocs[nvme_cmd_resv_report] =
391 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
392 log->iocs[nvme_cmd_write] =
393 log->iocs[nvme_cmd_write_zeroes] =
394 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
397 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
399 log->iocs[nvme_cmd_zone_append] =
400 log->iocs[nvme_cmd_zone_mgmt_send] =
401 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
402 log->iocs[nvme_cmd_zone_mgmt_recv] =
403 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
406 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
408 struct nvmet_ctrl *ctrl = req->sq->ctrl;
409 struct nvme_effects_log *log;
410 u16 status = NVME_SC_SUCCESS;
412 log = kzalloc(sizeof(*log), GFP_KERNEL);
414 status = NVME_SC_INTERNAL;
418 switch (req->cmd->get_log_page.csi) {
420 nvmet_get_cmd_effects_admin(ctrl, log);
421 nvmet_get_cmd_effects_nvm(log);
424 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
425 status = NVME_SC_INVALID_IO_CMD_SET;
428 nvmet_get_cmd_effects_admin(ctrl, log);
429 nvmet_get_cmd_effects_nvm(log);
430 nvmet_get_cmd_effects_zns(log);
433 status = NVME_SC_INVALID_LOG_PAGE;
437 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
441 nvmet_req_complete(req, status);
444 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
446 struct nvmet_ctrl *ctrl = req->sq->ctrl;
447 u16 status = NVME_SC_INTERNAL;
450 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
453 mutex_lock(&ctrl->lock);
454 if (ctrl->nr_changed_ns == U32_MAX)
455 len = sizeof(__le32);
457 len = ctrl->nr_changed_ns * sizeof(__le32);
458 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
460 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
461 ctrl->nr_changed_ns = 0;
462 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
463 mutex_unlock(&ctrl->lock);
465 nvmet_req_complete(req, status);
468 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
469 struct nvme_ana_group_desc *desc)
471 struct nvmet_ctrl *ctrl = req->sq->ctrl;
476 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
477 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
478 if (ns->anagrpid == grpid)
479 desc->nsids[count++] = cpu_to_le32(ns->nsid);
483 desc->grpid = cpu_to_le32(grpid);
484 desc->nnsids = cpu_to_le32(count);
485 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
486 desc->state = req->port->ana_state[grpid];
487 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
488 return struct_size(desc, nsids, count);
491 static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
493 u64 host_reads, host_writes, data_units_read, data_units_written;
494 struct nvme_endurance_group_log *log;
498 * The target driver emulates each endurance group as its own
499 * namespace, reusing the nsid as the endurance group identifier.
501 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
502 req->cmd->get_log_page.lsi));
503 status = nvmet_req_find_ns(req);
507 log = kzalloc(sizeof(*log), GFP_KERNEL);
509 status = NVME_SC_INTERNAL;
516 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
518 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
519 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
521 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
523 put_unaligned_le64(host_reads, &log->hrc[0]);
524 put_unaligned_le64(data_units_read, &log->dur[0]);
525 put_unaligned_le64(host_writes, &log->hwc[0]);
526 put_unaligned_le64(data_units_written, &log->duw[0]);
528 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
531 nvmet_req_complete(req, status);
534 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
536 struct nvme_ana_rsp_hdr hdr = { 0, };
537 struct nvme_ana_group_desc *desc;
538 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
544 status = NVME_SC_INTERNAL;
545 desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
550 down_read(&nvmet_ana_sem);
551 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
552 if (!nvmet_ana_group_enabled[grpid])
554 len = nvmet_format_ana_group(req, grpid, desc);
555 status = nvmet_copy_to_sgl(req, offset, desc, len);
561 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
562 if (nvmet_ana_group_enabled[grpid])
566 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
567 hdr.ngrps = cpu_to_le16(ngrps);
568 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
569 up_read(&nvmet_ana_sem);
573 /* copy the header last once we know the number of groups */
574 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
576 nvmet_req_complete(req, status);
579 static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
581 struct nvme_supported_features_log *features;
584 features = kzalloc(sizeof(*features), GFP_KERNEL);
586 status = NVME_SC_INTERNAL;
590 features->fis[NVME_FEAT_NUM_QUEUES] =
591 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
592 features->fis[NVME_FEAT_KATO] =
593 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
594 features->fis[NVME_FEAT_ASYNC_EVENT] =
595 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
596 features->fis[NVME_FEAT_HOST_ID] =
597 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
598 features->fis[NVME_FEAT_WRITE_PROTECT] =
599 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
600 features->fis[NVME_FEAT_RESV_MASK] =
601 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
603 status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
606 nvmet_req_complete(req, status);
609 static void nvmet_execute_get_log_page(struct nvmet_req *req)
611 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
614 switch (req->cmd->get_log_page.lid) {
615 case NVME_LOG_SUPPORTED:
616 return nvmet_execute_get_supported_log_pages(req);
618 return nvmet_execute_get_log_page_error(req);
620 return nvmet_execute_get_log_page_smart(req);
621 case NVME_LOG_FW_SLOT:
623 * We only support a single firmware slot which always is
624 * active, so we can zero out the whole firmware slot log and
625 * still claim to fully implement this mandatory log page.
627 return nvmet_execute_get_log_page_noop(req);
628 case NVME_LOG_CHANGED_NS:
629 return nvmet_execute_get_log_changed_ns(req);
630 case NVME_LOG_CMD_EFFECTS:
631 return nvmet_execute_get_log_cmd_effects_ns(req);
632 case NVME_LOG_ENDURANCE_GROUP:
633 return nvmet_execute_get_log_page_endgrp(req);
635 return nvmet_execute_get_log_page_ana(req);
636 case NVME_LOG_FEATURES:
637 return nvmet_execute_get_log_page_features(req);
639 return nvmet_execute_get_log_page_rmi(req);
640 case NVME_LOG_RESERVATION:
641 return nvmet_execute_get_log_page_resv(req);
643 pr_debug("unhandled lid %d on qid %d\n",
644 req->cmd->get_log_page.lid, req->sq->qid);
645 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
646 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
649 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
651 struct nvmet_ctrl *ctrl = req->sq->ctrl;
652 struct nvmet_subsys *subsys = ctrl->subsys;
653 struct nvme_id_ctrl *id;
654 u32 cmd_capsule_size, ctratt;
657 if (!subsys->subsys_discovered) {
658 mutex_lock(&subsys->lock);
659 subsys->subsys_discovered = true;
660 mutex_unlock(&subsys->lock);
663 id = kzalloc(sizeof(*id), GFP_KERNEL);
665 status = NVME_SC_INTERNAL;
669 id->vid = cpu_to_le16(subsys->vendor_id);
670 id->ssvid = cpu_to_le16(subsys->subsys_vendor_id);
672 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
673 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
674 strlen(subsys->model_number), ' ');
675 memcpy_and_pad(id->fr, sizeof(id->fr),
676 subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
678 put_unaligned_le24(subsys->ieee_oui, id->ieee);
682 if (nvmet_is_disc_subsys(ctrl->subsys))
683 id->cntrltype = NVME_CTRL_DISC;
685 id->cntrltype = NVME_CTRL_IO;
687 /* we support multiple ports, multiples hosts and ANA: */
688 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
691 /* Limit MDTS according to transport capability */
692 if (ctrl->ops->get_mdts)
693 id->mdts = ctrl->ops->get_mdts(ctrl);
697 id->cntlid = cpu_to_le16(ctrl->cntlid);
698 id->ver = cpu_to_le32(ctrl->subsys->ver);
700 /* XXX: figure out what to do about RTD3R/RTD3 */
701 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
702 ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS;
703 if (nvmet_is_pci_ctrl(ctrl))
704 ctratt |= NVME_CTRL_ATTR_RHII;
705 id->ctratt = cpu_to_le32(ctratt);
710 * We don't really have a practical limit on the number of abort
711 * comands. But we don't do anything useful for abort either, so
712 * no point in allowing more abort commands than the spec requires.
716 id->aerl = NVMET_ASYNC_EVENTS - 1;
718 /* first slot is read-only, only one slot supported */
719 id->frmw = (1 << 0) | (1 << 1);
720 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
721 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
724 /* We support keep-alive timeout in granularity of seconds */
725 id->kas = cpu_to_le16(NVMET_KAS);
727 id->sqes = (0x6 << 4) | 0x6;
728 id->cqes = (0x4 << 4) | 0x4;
730 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
731 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
733 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
734 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
735 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
736 NVME_CTRL_ONCS_WRITE_ZEROES |
737 NVME_CTRL_ONCS_RESERVATIONS);
739 /* XXX: don't report vwc if the underlying device is write through */
740 id->vwc = NVME_CTRL_VWC_PRESENT;
743 * We can't support atomic writes bigger than a LBA without support
744 * from the backend device.
749 /* we always support SGLs */
750 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED);
751 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
752 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS);
753 if (req->port->inline_data_size)
754 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS);
756 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
759 * Max command capsule size is sqe + in-capsule data size.
760 * Disable in-capsule data for Metadata capable controllers.
762 cmd_capsule_size = sizeof(struct nvme_command);
763 if (!ctrl->pi_support)
764 cmd_capsule_size += req->port->inline_data_size;
765 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
767 /* Max response capsule size is cqe */
768 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
770 id->msdbd = ctrl->ops->msdbd;
773 * Endurance group identifier is 16 bits, so we can't let namespaces
774 * overflow that since we reuse the nsid
776 BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
777 id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
779 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
780 id->anatt = 10; /* random value */
781 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
782 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
785 * Meh, we don't really support any power state. Fake up the same
786 * values that qemu does.
788 id->psd[0].max_power = cpu_to_le16(0x9c4);
789 id->psd[0].entry_lat = cpu_to_le32(0x10);
790 id->psd[0].exit_lat = cpu_to_le32(0x4);
792 id->nwpc = 1 << 0; /* write protect and no write protect */
794 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
798 nvmet_req_complete(req, status);
801 static void nvmet_execute_identify_ns(struct nvmet_req *req)
803 struct nvme_id_ns *id;
806 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
807 req->error_loc = offsetof(struct nvme_identify, nsid);
808 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
812 id = kzalloc(sizeof(*id), GFP_KERNEL);
814 status = NVME_SC_INTERNAL;
818 /* return an all zeroed buffer if we can't find an active namespace */
819 status = nvmet_req_find_ns(req);
825 if (nvmet_ns_revalidate(req->ns)) {
826 mutex_lock(&req->ns->subsys->lock);
827 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
828 mutex_unlock(&req->ns->subsys->lock);
832 * nuse = ncap = nsze isn't always true, but we have no way to find
833 * that out from the underlying device.
835 id->ncap = id->nsze =
836 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
837 switch (req->port->ana_state[req->ns->anagrpid]) {
838 case NVME_ANA_INACCESSIBLE:
839 case NVME_ANA_PERSISTENT_LOSS:
847 nvmet_bdev_set_limits(req->ns->bdev, id);
850 * We just provide a single LBA format that matches what the
851 * underlying device reports.
857 * Our namespace might always be shared. Not just with other
858 * controllers, but also with any other user of the block device.
860 id->nmic = NVME_NS_NMIC_SHARED;
861 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
863 if (req->ns->pr.enable)
864 id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
865 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
866 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
867 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
868 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
869 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
870 NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
873 * Since we don't know any better, every namespace is its own endurance
876 id->endgid = cpu_to_le16(req->ns->nsid);
878 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
880 id->lbaf[0].ds = req->ns->blksize_shift;
882 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
883 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
884 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
885 NVME_NS_DPC_PI_TYPE3;
886 id->mc = NVME_MC_EXTENDED_LBA;
887 id->dps = req->ns->pi_type;
888 id->flbas = NVME_NS_FLBAS_META_EXT;
889 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
892 if (req->ns->readonly)
893 id->nsattr |= NVME_NS_ATTR_RO;
896 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
900 nvmet_req_complete(req, status);
903 static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
905 u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
906 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
907 struct nvmet_ctrl *ctrl = req->sq->ctrl;
914 list = kzalloc(buf_size, GFP_KERNEL);
916 status = NVME_SC_INTERNAL;
920 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
921 if (ns->nsid <= min_endgid)
924 list[i++] = cpu_to_le16(ns->nsid);
925 if (i == buf_size / sizeof(__le16))
929 list[0] = cpu_to_le16(i - 1);
930 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
933 nvmet_req_complete(req, status);
936 static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
938 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
939 struct nvmet_ctrl *ctrl = req->sq->ctrl;
942 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
948 * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
949 * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
951 if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
952 req->error_loc = offsetof(struct nvme_identify, nsid);
953 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
957 list = kzalloc(buf_size, GFP_KERNEL);
959 status = NVME_SC_INTERNAL;
963 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
964 if (ns->nsid <= min_nsid)
966 if (match_css && req->ns->csi != req->cmd->identify.csi)
968 list[i++] = cpu_to_le32(ns->nsid);
969 if (i == buf_size / sizeof(__le32))
973 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
977 nvmet_req_complete(req, status);
980 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
981 void *id, off_t *off)
983 struct nvme_ns_id_desc desc = {
989 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
992 *off += sizeof(desc);
994 status = nvmet_copy_to_sgl(req, *off, id, len);
1002 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
1007 status = nvmet_req_find_ns(req);
1011 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
1012 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
1014 &req->ns->uuid, &off);
1018 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
1019 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
1020 NVME_NIDT_NGUID_LEN,
1021 &req->ns->nguid, &off);
1026 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
1028 &req->ns->csi, &off);
1032 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
1033 off) != NVME_IDENTIFY_DATA_SIZE - off)
1034 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1037 nvmet_req_complete(req, status);
1040 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
1042 /* Not supported: return zeroes */
1043 nvmet_req_complete(req,
1044 nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
1047 static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
1050 struct nvme_id_ns_nvm *id;
1052 status = nvmet_req_find_ns(req);
1056 id = kzalloc(sizeof(*id), GFP_KERNEL);
1058 status = NVME_SC_INTERNAL;
1061 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1064 nvmet_req_complete(req, status);
1067 static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
1069 struct nvme_id_ns_cs_indep *id;
1072 status = nvmet_req_find_ns(req);
1076 id = kzalloc(sizeof(*id), GFP_KERNEL);
1078 status = NVME_SC_INTERNAL;
1082 id->nstat = NVME_NSTAT_NRDY;
1083 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
1084 id->nmic = NVME_NS_NMIC_SHARED;
1085 if (req->ns->readonly)
1086 id->nsattr |= NVME_NS_ATTR_RO;
1087 if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
1088 id->nsfeat |= NVME_NS_ROTATIONAL;
1090 * We need flush command to flush the file's metadata,
1091 * so report supporting vwc if backend is file, even
1092 * though buffered_io is disable.
1094 if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
1095 id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
1097 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1100 nvmet_req_complete(req, status);
1103 static void nvmet_execute_identify(struct nvmet_req *req)
1105 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
1108 switch (req->cmd->identify.cns) {
1109 case NVME_ID_CNS_NS:
1110 nvmet_execute_identify_ns(req);
1112 case NVME_ID_CNS_CTRL:
1113 nvmet_execute_identify_ctrl(req);
1115 case NVME_ID_CNS_NS_ACTIVE_LIST:
1116 nvmet_execute_identify_nslist(req, false);
1118 case NVME_ID_CNS_NS_DESC_LIST:
1119 nvmet_execute_identify_desclist(req);
1121 case NVME_ID_CNS_CS_NS:
1122 switch (req->cmd->identify.csi) {
1124 nvme_execute_identify_ns_nvm(req);
1127 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1128 nvmet_execute_identify_ns_zns(req);
1134 case NVME_ID_CNS_CS_CTRL:
1135 switch (req->cmd->identify.csi) {
1137 nvmet_execute_identify_ctrl_nvm(req);
1140 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1141 nvmet_execute_identify_ctrl_zns(req);
1147 case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
1148 nvmet_execute_identify_nslist(req, true);
1150 case NVME_ID_CNS_NS_CS_INDEP:
1151 nvmet_execute_id_cs_indep(req);
1153 case NVME_ID_CNS_ENDGRP_LIST:
1154 nvmet_execute_identify_endgrp_list(req);
1158 pr_debug("unhandled identify cns %d on qid %d\n",
1159 req->cmd->identify.cns, req->sq->qid);
1160 req->error_loc = offsetof(struct nvme_identify, cns);
1161 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
1165 * A "minimum viable" abort implementation: the command is mandatory in the
1166 * spec, but we are not required to do any useful work. We couldn't really
1167 * do a useful abort, so don't bother even with waiting for the command
1168 * to be executed and return immediately telling the command to abort
1171 static void nvmet_execute_abort(struct nvmet_req *req)
1173 if (!nvmet_check_transfer_len(req, 0))
1175 nvmet_set_result(req, 1);
1176 nvmet_req_complete(req, 0);
1179 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
1184 status = nvmet_file_flush(req);
1186 status = nvmet_bdev_flush(req);
1189 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
1193 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
1195 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
1196 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1199 status = nvmet_req_find_ns(req);
1203 mutex_lock(&subsys->lock);
1204 switch (write_protect) {
1205 case NVME_NS_WRITE_PROTECT:
1206 req->ns->readonly = true;
1207 status = nvmet_write_protect_flush_sync(req);
1209 req->ns->readonly = false;
1211 case NVME_NS_NO_WRITE_PROTECT:
1212 req->ns->readonly = false;
1220 nvmet_ns_changed(subsys, req->ns->nsid);
1221 mutex_unlock(&subsys->lock);
1225 u16 nvmet_set_feat_kato(struct nvmet_req *req)
1227 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1229 nvmet_stop_keep_alive_timer(req->sq->ctrl);
1230 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
1231 nvmet_start_keep_alive_timer(req->sq->ctrl);
1233 nvmet_set_result(req, req->sq->ctrl->kato);
1238 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
1240 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1242 if (val32 & ~mask) {
1243 req->error_loc = offsetof(struct nvme_common_command, cdw11);
1244 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1247 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
1248 nvmet_set_result(req, val32);
1253 static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
1255 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1257 if (!nvmet_is_pci_ctrl(ctrl))
1258 return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1261 * The NVMe base specifications v2.1 recommends supporting 128-bits host
1262 * IDs (section 5.1.25.1.28.1). However, that same section also says
1263 * that "The controller may support a 64-bit Host Identifier and/or an
1264 * extended 128-bit Host Identifier". So simplify this support and do
1265 * not support 64-bits host IDs to avoid needing to check that all
1266 * controllers associated with the same subsystem all use the same host
1269 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1270 req->error_loc = offsetof(struct nvme_common_command, cdw11);
1271 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1274 return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid,
1275 sizeof(req->sq->ctrl->hostid));
1278 static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
1280 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1281 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1282 struct nvmet_feat_irq_coalesce irqc = {
1283 .time = (cdw11 >> 8) & 0xff,
1284 .thr = cdw11 & 0xff,
1288 * This feature is not supported for fabrics controllers and mandatory
1289 * for PCI controllers.
1291 if (!nvmet_is_pci_ctrl(ctrl)) {
1292 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1293 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1296 return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1299 static u16 nvmet_set_feat_irq_config(struct nvmet_req *req)
1301 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1302 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1303 struct nvmet_feat_irq_config irqcfg = {
1304 .iv = cdw11 & 0xffff,
1305 .cd = (cdw11 >> 16) & 0x1,
1309 * This feature is not supported for fabrics controllers and mandatory
1310 * for PCI controllers.
1312 if (!nvmet_is_pci_ctrl(ctrl)) {
1313 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1314 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1317 return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1320 static u16 nvmet_set_feat_arbitration(struct nvmet_req *req)
1322 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1323 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1324 struct nvmet_feat_arbitration arb = {
1325 .hpw = (cdw11 >> 24) & 0xff,
1326 .mpw = (cdw11 >> 16) & 0xff,
1327 .lpw = (cdw11 >> 8) & 0xff,
1331 if (!ctrl->ops->set_feature) {
1332 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1333 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1336 return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1339 void nvmet_execute_set_features(struct nvmet_req *req)
1341 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1342 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1343 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1348 if (!nvmet_check_data_len_lte(req, 0))
1351 switch (cdw10 & 0xff) {
1352 case NVME_FEAT_ARBITRATION:
1353 status = nvmet_set_feat_arbitration(req);
1355 case NVME_FEAT_NUM_QUEUES:
1356 ncqr = (cdw11 >> 16) & 0xffff;
1357 nsqr = cdw11 & 0xffff;
1358 if (ncqr == 0xffff || nsqr == 0xffff) {
1359 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1362 nvmet_set_result(req,
1363 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
1365 case NVME_FEAT_IRQ_COALESCE:
1366 status = nvmet_set_feat_irq_coalesce(req);
1368 case NVME_FEAT_IRQ_CONFIG:
1369 status = nvmet_set_feat_irq_config(req);
1371 case NVME_FEAT_KATO:
1372 status = nvmet_set_feat_kato(req);
1374 case NVME_FEAT_ASYNC_EVENT:
1375 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
1377 case NVME_FEAT_HOST_ID:
1378 status = nvmet_set_feat_host_id(req);
1380 case NVME_FEAT_WRITE_PROTECT:
1381 status = nvmet_set_feat_write_protect(req);
1383 case NVME_FEAT_RESV_MASK:
1384 status = nvmet_set_feat_resv_notif_mask(req, cdw11);
1387 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1388 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1392 nvmet_req_complete(req, status);
1395 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
1397 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1400 result = nvmet_req_find_ns(req);
1404 mutex_lock(&subsys->lock);
1405 if (req->ns->readonly == true)
1406 result = NVME_NS_WRITE_PROTECT;
1408 result = NVME_NS_NO_WRITE_PROTECT;
1409 nvmet_set_result(req, result);
1410 mutex_unlock(&subsys->lock);
1415 static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
1417 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1418 struct nvmet_feat_irq_coalesce irqc = { };
1422 * This feature is not supported for fabrics controllers and mandatory
1423 * for PCI controllers.
1425 if (!nvmet_is_pci_ctrl(ctrl)) {
1426 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1427 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1430 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1431 if (status != NVME_SC_SUCCESS)
1434 nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
1436 return NVME_SC_SUCCESS;
1439 static u16 nvmet_get_feat_irq_config(struct nvmet_req *req)
1441 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1442 u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff;
1443 struct nvmet_feat_irq_config irqcfg = { .iv = iv };
1447 * This feature is not supported for fabrics controllers and mandatory
1448 * for PCI controllers.
1450 if (!nvmet_is_pci_ctrl(ctrl)) {
1451 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1452 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1455 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1456 if (status != NVME_SC_SUCCESS)
1459 nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv);
1461 return NVME_SC_SUCCESS;
1464 static u16 nvmet_get_feat_arbitration(struct nvmet_req *req)
1466 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1467 struct nvmet_feat_arbitration arb = { };
1470 if (!ctrl->ops->get_feature) {
1471 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1472 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1475 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1476 if (status != NVME_SC_SUCCESS)
1479 nvmet_set_result(req,
1480 ((u32)arb.hpw << 24) |
1481 ((u32)arb.mpw << 16) |
1482 ((u32)arb.lpw << 8) |
1485 return NVME_SC_SUCCESS;
1488 void nvmet_get_feat_kato(struct nvmet_req *req)
1490 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
1493 void nvmet_get_feat_async_event(struct nvmet_req *req)
1495 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
1498 void nvmet_execute_get_features(struct nvmet_req *req)
1500 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1501 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1504 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
1507 switch (cdw10 & 0xff) {
1509 * These features are mandatory in the spec, but we don't
1510 * have a useful way to implement them. We'll eventually
1511 * need to come up with some fake values for these.
1514 case NVME_FEAT_POWER_MGMT:
1516 case NVME_FEAT_TEMP_THRESH:
1518 case NVME_FEAT_ERR_RECOVERY:
1520 case NVME_FEAT_WRITE_ATOMIC:
1523 case NVME_FEAT_ARBITRATION:
1524 status = nvmet_get_feat_arbitration(req);
1526 case NVME_FEAT_IRQ_COALESCE:
1527 status = nvmet_get_feat_irq_coalesce(req);
1529 case NVME_FEAT_IRQ_CONFIG:
1530 status = nvmet_get_feat_irq_config(req);
1532 case NVME_FEAT_ASYNC_EVENT:
1533 nvmet_get_feat_async_event(req);
1535 case NVME_FEAT_VOLATILE_WC:
1536 nvmet_set_result(req, 1);
1538 case NVME_FEAT_NUM_QUEUES:
1539 nvmet_set_result(req,
1540 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
1542 case NVME_FEAT_KATO:
1543 nvmet_get_feat_kato(req);
1545 case NVME_FEAT_HOST_ID:
1546 /* need 128-bit host identifier flag */
1547 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1549 offsetof(struct nvme_common_command, cdw11);
1550 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1554 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
1555 sizeof(req->sq->ctrl->hostid));
1557 case NVME_FEAT_WRITE_PROTECT:
1558 status = nvmet_get_feat_write_protect(req);
1560 case NVME_FEAT_RESV_MASK:
1561 status = nvmet_get_feat_resv_notif_mask(req);
1565 offsetof(struct nvme_common_command, cdw10);
1566 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1570 nvmet_req_complete(req, status);
1573 void nvmet_execute_async_event(struct nvmet_req *req)
1575 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1577 if (!nvmet_check_transfer_len(req, 0))
1580 mutex_lock(&ctrl->lock);
1581 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
1582 mutex_unlock(&ctrl->lock);
1583 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
1586 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
1587 mutex_unlock(&ctrl->lock);
1589 queue_work(nvmet_wq, &ctrl->async_event_work);
1592 void nvmet_execute_keep_alive(struct nvmet_req *req)
1594 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1597 if (!nvmet_check_transfer_len(req, 0))
1601 status = NVME_SC_KA_TIMEOUT_INVALID;
1605 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1606 ctrl->cntlid, ctrl->kato);
1607 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1609 nvmet_req_complete(req, status);
1612 u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
1614 struct nvme_command *cmd = req->cmd;
1616 if (nvme_is_fabrics(cmd))
1617 return nvmet_fabrics_admin_cmd_data_len(req);
1618 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1619 return nvmet_discovery_cmd_data_len(req);
1621 switch (cmd->common.opcode) {
1622 case nvme_admin_get_log_page:
1623 return nvmet_get_log_page_len(cmd);
1624 case nvme_admin_identify:
1625 return NVME_IDENTIFY_DATA_SIZE;
1626 case nvme_admin_get_features:
1627 return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10));
1633 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1635 struct nvme_command *cmd = req->cmd;
1638 if (nvme_is_fabrics(cmd))
1639 return nvmet_parse_fabrics_admin_cmd(req);
1640 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1641 return nvmet_parse_discovery_cmd(req);
1643 ret = nvmet_check_ctrl_status(req);
1647 /* For PCI controllers, admin commands shall not use SGL. */
1648 if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
1649 cmd->common.flags & NVME_CMD_SGL_ALL)
1650 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1652 if (nvmet_is_passthru_req(req))
1653 return nvmet_parse_passthru_admin_cmd(req);
1655 switch (cmd->common.opcode) {
1656 case nvme_admin_delete_sq:
1657 req->execute = nvmet_execute_delete_sq;
1659 case nvme_admin_create_sq:
1660 req->execute = nvmet_execute_create_sq;
1662 case nvme_admin_get_log_page:
1663 req->execute = nvmet_execute_get_log_page;
1665 case nvme_admin_delete_cq:
1666 req->execute = nvmet_execute_delete_cq;
1668 case nvme_admin_create_cq:
1669 req->execute = nvmet_execute_create_cq;
1671 case nvme_admin_identify:
1672 req->execute = nvmet_execute_identify;
1674 case nvme_admin_abort_cmd:
1675 req->execute = nvmet_execute_abort;
1677 case nvme_admin_set_features:
1678 req->execute = nvmet_execute_set_features;
1680 case nvme_admin_get_features:
1681 req->execute = nvmet_execute_get_features;
1683 case nvme_admin_async_event:
1684 req->execute = nvmet_execute_async_event;
1686 case nvme_admin_keep_alive:
1687 req->execute = nvmet_execute_keep_alive;
1690 return nvmet_report_invalid_opcode(req);