nvmet: clean up command parsing a bit
[linux-2.6-block.git] / drivers / nvme / target / admin-cmd.c
CommitLineData
77141dc6 1// SPDX-License-Identifier: GPL-2.0
a07b4970
CH
2/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
a07b4970
CH
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
b2d09103
IM
8#include <linux/rculist.h>
9
a07b4970 10#include <generated/utsrelease.h>
2d79c7dc 11#include <asm/unaligned.h>
a07b4970
CH
12#include "nvmet.h"
13
14u32 nvmet_get_log_page_len(struct nvme_command *cmd)
15{
16 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
17
18 len <<= 16;
19 len += le16_to_cpu(cmd->get_log_page.numdl);
20 /* NUMD is a 0's based value */
21 len += 1;
22 len *= sizeof(u32);
23
24 return len;
25}
26
d808b7f7
KB
27u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
28{
29 return le64_to_cpu(cmd->get_log_page.lpo);
30}
31
8ab0805f
CH
32static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
33{
e9061c39 34 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
8ab0805f
CH
35}
36
11ad5077
CK
37static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
38{
39 struct nvmet_ctrl *ctrl = req->sq->ctrl;
11ad5077
CK
40 unsigned long flags;
41 off_t offset = 0;
42 u64 slot;
43 u64 i;
44
45 spin_lock_irqsave(&ctrl->error_lock, flags);
46 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
47
48 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
5f8badbc
A
49 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
50 sizeof(struct nvme_error_slot)))
11ad5077
CK
51 break;
52
53 if (slot == 0)
54 slot = NVMET_ERROR_LOG_SLOTS - 1;
55 else
56 slot--;
57 offset += sizeof(struct nvme_error_slot);
58 }
59 spin_unlock_irqrestore(&ctrl->error_lock, flags);
5f8badbc 60 nvmet_req_complete(req, 0);
11ad5077
CK
61}
62
2d79c7dc
CK
63static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
64 struct nvme_smart_log *slog)
65{
2d79c7dc
CK
66 struct nvmet_ns *ns;
67 u64 host_reads, host_writes, data_units_read, data_units_written;
68
2d79c7dc
CK
69 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
70 if (!ns) {
d93cb392 71 pr_err("Could not find namespace id : %d\n",
2d79c7dc 72 le32_to_cpu(req->cmd->get_log_page.nsid));
2da6e005 73 req->error_loc = offsetof(struct nvme_rw_command, nsid);
4185f25a 74 return NVME_SC_INVALID_NS;
2d79c7dc
CK
75 }
76
d5eff33e
CK
77 /* we don't have the right data for file backed ns */
78 if (!ns->bdev)
79 goto out;
80
2d79c7dc 81 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
3bec2e37
TW
82 data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
83 sectors[READ]), 1000);
2d79c7dc 84 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
3bec2e37
TW
85 data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
86 sectors[WRITE]), 1000);
2d79c7dc
CK
87
88 put_unaligned_le64(host_reads, &slog->host_reads[0]);
89 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
90 put_unaligned_le64(host_writes, &slog->host_writes[0]);
91 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
d5eff33e 92out:
2d79c7dc 93 nvmet_put_namespace(ns);
4185f25a
SG
94
95 return NVME_SC_SUCCESS;
2d79c7dc
CK
96}
97
98static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
99 struct nvme_smart_log *slog)
100{
2d79c7dc
CK
101 u64 host_reads = 0, host_writes = 0;
102 u64 data_units_read = 0, data_units_written = 0;
103 struct nvmet_ns *ns;
104 struct nvmet_ctrl *ctrl;
105
2d79c7dc
CK
106 ctrl = req->sq->ctrl;
107
108 rcu_read_lock();
109 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
d5eff33e
CK
110 /* we don't have the right data for file backed ns */
111 if (!ns->bdev)
112 continue;
2d79c7dc 113 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
3bec2e37
TW
114 data_units_read += DIV_ROUND_UP(
115 part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
2d79c7dc 116 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
3bec2e37
TW
117 data_units_written += DIV_ROUND_UP(
118 part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
2d79c7dc
CK
119
120 }
121 rcu_read_unlock();
122
123 put_unaligned_le64(host_reads, &slog->host_reads[0]);
124 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
125 put_unaligned_le64(host_writes, &slog->host_writes[0]);
126 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
127
4185f25a 128 return NVME_SC_SUCCESS;
2d79c7dc
CK
129}
130
8ab0805f 131static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
a07b4970 132{
8ab0805f
CH
133 struct nvme_smart_log *log;
134 u16 status = NVME_SC_INTERNAL;
23454d59 135 unsigned long flags;
a07b4970 136
e9061c39 137 if (req->transfer_len != sizeof(*log))
a07b4970 138 goto out;
a07b4970 139
8ab0805f
CH
140 log = kzalloc(sizeof(*log), GFP_KERNEL);
141 if (!log)
142 goto out;
a07b4970 143
8ab0805f
CH
144 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
145 status = nvmet_get_smart_log_all(req, log);
146 else
147 status = nvmet_get_smart_log_nsid(req, log);
148 if (status)
c42d7a30 149 goto out_free_log;
a07b4970 150
23454d59
CK
151 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
152 put_unaligned_le64(req->sq->ctrl->err_counter,
153 &log->num_err_log_entries);
154 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
155
8ab0805f 156 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
c42d7a30
CK
157out_free_log:
158 kfree(log);
a07b4970
CH
159out:
160 nvmet_req_complete(req, status);
161}
162
0866bf0c
CK
163static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
164{
165 u16 status = NVME_SC_INTERNAL;
166 struct nvme_effects_log *log;
167
168 log = kzalloc(sizeof(*log), GFP_KERNEL);
169 if (!log)
170 goto out;
171
172 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
173 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
174 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
175 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
176 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
177 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
178 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
179
180 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
181 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
182 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
183 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
184 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
185
186 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
187
188 kfree(log);
189out:
190 nvmet_req_complete(req, status);
191}
192
c16734ea
CH
193static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
194{
195 struct nvmet_ctrl *ctrl = req->sq->ctrl;
196 u16 status = NVME_SC_INTERNAL;
197 size_t len;
198
e9061c39 199 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
c16734ea
CH
200 goto out;
201
202 mutex_lock(&ctrl->lock);
203 if (ctrl->nr_changed_ns == U32_MAX)
204 len = sizeof(__le32);
205 else
206 len = ctrl->nr_changed_ns * sizeof(__le32);
207 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
208 if (!status)
e9061c39 209 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
c16734ea 210 ctrl->nr_changed_ns = 0;
7114ddeb 211 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
c16734ea
CH
212 mutex_unlock(&ctrl->lock);
213out:
214 nvmet_req_complete(req, status);
215}
216
72efd25d
CH
217static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
218 struct nvme_ana_group_desc *desc)
219{
220 struct nvmet_ctrl *ctrl = req->sq->ctrl;
221 struct nvmet_ns *ns;
222 u32 count = 0;
223
224 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
225 rcu_read_lock();
226 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
227 if (ns->anagrpid == grpid)
228 desc->nsids[count++] = cpu_to_le32(ns->nsid);
229 rcu_read_unlock();
230 }
231
232 desc->grpid = cpu_to_le32(grpid);
233 desc->nnsids = cpu_to_le32(count);
234 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
235 desc->state = req->port->ana_state[grpid];
236 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
237 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
238}
239
240static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
241{
242 struct nvme_ana_rsp_hdr hdr = { 0, };
243 struct nvme_ana_group_desc *desc;
244 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
245 size_t len;
246 u32 grpid;
247 u16 ngrps = 0;
248 u16 status;
249
250 status = NVME_SC_INTERNAL;
251 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
252 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
253 if (!desc)
254 goto out;
255
256 down_read(&nvmet_ana_sem);
257 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
258 if (!nvmet_ana_group_enabled[grpid])
259 continue;
260 len = nvmet_format_ana_group(req, grpid, desc);
261 status = nvmet_copy_to_sgl(req, offset, desc, len);
262 if (status)
263 break;
264 offset += len;
265 ngrps++;
266 }
be1277f5
HR
267 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
268 if (nvmet_ana_group_enabled[grpid])
269 ngrps++;
270 }
72efd25d
CH
271
272 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
273 hdr.ngrps = cpu_to_le16(ngrps);
7114ddeb 274 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
72efd25d
CH
275 up_read(&nvmet_ana_sem);
276
277 kfree(desc);
278
279 /* copy the header last once we know the number of groups */
280 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
281out:
282 nvmet_req_complete(req, status);
283}
284
2cb6963a
CH
285static void nvmet_execute_get_log_page(struct nvmet_req *req)
286{
e9061c39
CH
287 if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
288 return;
289
2cb6963a
CH
290 switch (req->cmd->get_log_page.lid) {
291 case NVME_LOG_ERROR:
292 return nvmet_execute_get_log_page_error(req);
293 case NVME_LOG_SMART:
294 return nvmet_execute_get_log_page_smart(req);
295 case NVME_LOG_FW_SLOT:
296 /*
297 * We only support a single firmware slot which always is
298 * active, so we can zero out the whole firmware slot log and
299 * still claim to fully implement this mandatory log page.
300 */
301 return nvmet_execute_get_log_page_noop(req);
302 case NVME_LOG_CHANGED_NS:
303 return nvmet_execute_get_log_changed_ns(req);
304 case NVME_LOG_CMD_EFFECTS:
305 return nvmet_execute_get_log_cmd_effects_ns(req);
306 case NVME_LOG_ANA:
307 return nvmet_execute_get_log_page_ana(req);
308 }
309 pr_err("unhandled lid %d on qid %d\n",
310 req->cmd->get_log_page.lid, req->sq->qid);
311 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
312 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
313}
314
a07b4970
CH
315static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
316{
317 struct nvmet_ctrl *ctrl = req->sq->ctrl;
318 struct nvme_id_ctrl *id;
a07b4970 319 u16 status = 0;
42de82a8 320 const char model[] = "Linux";
a07b4970
CH
321
322 id = kzalloc(sizeof(*id), GFP_KERNEL);
323 if (!id) {
324 status = NVME_SC_INTERNAL;
325 goto out;
326 }
327
328 /* XXX: figure out how to assign real vendors IDs. */
329 id->vid = 0;
330 id->ssvid = 0;
331
c7399698 332 memset(id->sn, ' ', sizeof(id->sn));
42de82a8
MW
333 bin2hex(id->sn, &ctrl->subsys->serial,
334 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
17c39d05
MW
335 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
336 memcpy_and_pad(id->fr, sizeof(id->fr),
337 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
a07b4970 338
a07b4970
CH
339 id->rab = 6;
340
341 /*
342 * XXX: figure out how we can assign a IEEE OUI, but until then
343 * the safest is to leave it as zeroes.
344 */
345
72efd25d
CH
346 /* we support multiple ports, multiples hosts and ANA: */
347 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
a07b4970
CH
348
349 /* no limit on data transfer sizes for now */
350 id->mdts = 0;
351 id->cntlid = cpu_to_le16(ctrl->cntlid);
352 id->ver = cpu_to_le32(ctrl->subsys->ver);
353
354 /* XXX: figure out what to do about RTD3R/RTD3 */
c86b8f7b 355 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
c09305ae
SG
356 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
357 NVME_CTRL_ATTR_TBKAS);
a07b4970
CH
358
359 id->oacs = 0;
360
361 /*
362 * We don't really have a practical limit on the number of abort
363 * comands. But we don't do anything useful for abort either, so
364 * no point in allowing more abort commands than the spec requires.
365 */
366 id->acl = 3;
367
368 id->aerl = NVMET_ASYNC_EVENTS - 1;
369
370 /* first slot is read-only, only one slot supported */
371 id->frmw = (1 << 0) | (1 << 1);
0866bf0c 372 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
a07b4970
CH
373 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
374 id->npss = 0;
375
376 /* We support keep-alive timeout in granularity of seconds */
377 id->kas = cpu_to_le16(NVMET_KAS);
378
379 id->sqes = (0x6 << 4) | 0x6;
380 id->cqes = (0x4 << 4) | 0x4;
381
382 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
383 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
384
385 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
793c7cfc 386 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
d2629209
CK
387 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
388 NVME_CTRL_ONCS_WRITE_ZEROES);
a07b4970
CH
389
390 /* XXX: don't report vwc if the underlying device is write through */
391 id->vwc = NVME_CTRL_VWC_PRESENT;
392
393 /*
394 * We can't support atomic writes bigger than a LBA without support
395 * from the backend device.
396 */
397 id->awun = 0;
398 id->awupf = 0;
399
400 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
401 if (ctrl->ops->has_keyed_sgls)
402 id->sgls |= cpu_to_le32(1 << 2);
0d5ee2b2 403 if (req->port->inline_data_size)
a07b4970
CH
404 id->sgls |= cpu_to_le32(1 << 20);
405
5eadc9cc 406 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
a07b4970
CH
407
408 /* Max command capsule size is sqe + single page of in-capsule data */
409 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
0d5ee2b2 410 req->port->inline_data_size) / 16);
a07b4970
CH
411 /* Max response capsule size is cqe */
412 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
413
414 id->msdbd = ctrl->ops->msdbd;
415
72efd25d
CH
416 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
417 id->anatt = 10; /* random value */
418 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
419 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
420
a07b4970
CH
421 /*
422 * Meh, we don't really support any power state. Fake up the same
423 * values that qemu does.
424 */
425 id->psd[0].max_power = cpu_to_le16(0x9c4);
426 id->psd[0].entry_lat = cpu_to_le32(0x10);
427 id->psd[0].exit_lat = cpu_to_le32(0x4);
428
dedf0be5
CK
429 id->nwpc = 1 << 0; /* write protect and no write protect */
430
a07b4970
CH
431 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
432
433 kfree(id);
434out:
435 nvmet_req_complete(req, status);
436}
437
438static void nvmet_execute_identify_ns(struct nvmet_req *req)
439{
440 struct nvmet_ns *ns;
441 struct nvme_id_ns *id;
442 u16 status = 0;
443
f39ae471 444 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
2da6e005 445 req->error_loc = offsetof(struct nvme_identify, nsid);
a07b4970
CH
446 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
447 goto out;
448 }
449
450 id = kzalloc(sizeof(*id), GFP_KERNEL);
451 if (!id) {
452 status = NVME_SC_INTERNAL;
f39ae471 453 goto out;
a07b4970
CH
454 }
455
f39ae471
CH
456 /* return an all zeroed buffer if we can't find an active namespace */
457 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
458 if (!ns)
459 goto done;
460
a07b4970 461 /*
18c53e40 462 * nuse = ncap = nsze isn't always true, but we have no way to find
a07b4970
CH
463 * that out from the underlying device.
464 */
72efd25d
CH
465 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
466 switch (req->port->ana_state[ns->anagrpid]) {
467 case NVME_ANA_INACCESSIBLE:
468 case NVME_ANA_PERSISTENT_LOSS:
469 break;
470 default:
471 id->nuse = id->nsze;
472 break;
473 }
a07b4970 474
9d05a96e
BVA
475 if (ns->bdev)
476 nvmet_bdev_set_limits(ns->bdev, id);
477
a07b4970
CH
478 /*
479 * We just provide a single LBA format that matches what the
480 * underlying device reports.
481 */
482 id->nlbaf = 0;
483 id->flbas = 0;
484
485 /*
486 * Our namespace might always be shared. Not just with other
487 * controllers, but also with any other user of the block device.
488 */
489 id->nmic = (1 << 0);
72efd25d 490 id->anagrpid = cpu_to_le32(ns->anagrpid);
a07b4970 491
1b0d2745 492 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
a07b4970
CH
493
494 id->lbaf[0].ds = ns->blksize_shift;
495
dedf0be5
CK
496 if (ns->readonly)
497 id->nsattr |= (1 << 0);
f39ae471
CH
498 nvmet_put_namespace(ns);
499done:
a07b4970 500 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
a07b4970 501 kfree(id);
a07b4970
CH
502out:
503 nvmet_req_complete(req, status);
504}
505
506static void nvmet_execute_identify_nslist(struct nvmet_req *req)
507{
0add5e8e 508 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
a07b4970
CH
509 struct nvmet_ctrl *ctrl = req->sq->ctrl;
510 struct nvmet_ns *ns;
511 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
512 __le32 *list;
513 u16 status = 0;
514 int i = 0;
515
516 list = kzalloc(buf_size, GFP_KERNEL);
517 if (!list) {
518 status = NVME_SC_INTERNAL;
519 goto out;
520 }
521
522 rcu_read_lock();
523 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
524 if (ns->nsid <= min_nsid)
525 continue;
526 list[i++] = cpu_to_le32(ns->nsid);
527 if (i == buf_size / sizeof(__le32))
528 break;
529 }
530 rcu_read_unlock();
531
532 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
533
534 kfree(list);
535out:
536 nvmet_req_complete(req, status);
537}
538
637dc0f3
JT
539static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
540 void *id, off_t *off)
541{
542 struct nvme_ns_id_desc desc = {
543 .nidt = type,
544 .nidl = len,
545 };
546 u16 status;
547
548 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
549 if (status)
550 return status;
551 *off += sizeof(desc);
552
553 status = nvmet_copy_to_sgl(req, *off, id, len);
554 if (status)
555 return status;
556 *off += len;
557
558 return 0;
559}
560
561static void nvmet_execute_identify_desclist(struct nvmet_req *req)
562{
563 struct nvmet_ns *ns;
564 u16 status = 0;
565 off_t off = 0;
566
567 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
568 if (!ns) {
2da6e005 569 req->error_loc = offsetof(struct nvme_identify, nsid);
637dc0f3
JT
570 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
571 goto out;
572 }
573
574 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
575 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
576 NVME_NIDT_UUID_LEN,
577 &ns->uuid, &off);
578 if (status)
579 goto out_put_ns;
580 }
581 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
582 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
583 NVME_NIDT_NGUID_LEN,
584 &ns->nguid, &off);
585 if (status)
586 goto out_put_ns;
587 }
588
589 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
590 off) != NVME_IDENTIFY_DATA_SIZE - off)
591 status = NVME_SC_INTERNAL | NVME_SC_DNR;
592out_put_ns:
593 nvmet_put_namespace(ns);
594out:
595 nvmet_req_complete(req, status);
596}
597
2cb6963a
CH
598static void nvmet_execute_identify(struct nvmet_req *req)
599{
e9061c39
CH
600 if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
601 return;
602
2cb6963a
CH
603 switch (req->cmd->identify.cns) {
604 case NVME_ID_CNS_NS:
605 return nvmet_execute_identify_ns(req);
606 case NVME_ID_CNS_CTRL:
607 return nvmet_execute_identify_ctrl(req);
608 case NVME_ID_CNS_NS_ACTIVE_LIST:
609 return nvmet_execute_identify_nslist(req);
610 case NVME_ID_CNS_NS_DESC_LIST:
611 return nvmet_execute_identify_desclist(req);
612 }
613
614 pr_err("unhandled identify cns %d on qid %d\n",
615 req->cmd->identify.cns, req->sq->qid);
616 req->error_loc = offsetof(struct nvme_identify, cns);
617 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
618}
619
a07b4970 620/*
18c53e40 621 * A "minimum viable" abort implementation: the command is mandatory in the
a07b4970
CH
622 * spec, but we are not required to do any useful work. We couldn't really
623 * do a useful abort, so don't bother even with waiting for the command
624 * to be exectuted and return immediately telling the command to abort
625 * wasn't found.
626 */
627static void nvmet_execute_abort(struct nvmet_req *req)
628{
e9061c39
CH
629 if (!nvmet_check_data_len(req, 0))
630 return;
a07b4970
CH
631 nvmet_set_result(req, 1);
632 nvmet_req_complete(req, 0);
633}
634
dedf0be5
CK
635static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
636{
637 u16 status;
638
639 if (req->ns->file)
640 status = nvmet_file_flush(req);
641 else
642 status = nvmet_bdev_flush(req);
643
644 if (status)
645 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
646 return status;
647}
648
649static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
650{
b7c8f366 651 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
dedf0be5
CK
652 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
653 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
654
655 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
2da6e005
CK
656 if (unlikely(!req->ns)) {
657 req->error_loc = offsetof(struct nvme_common_command, nsid);
dedf0be5 658 return status;
2da6e005 659 }
dedf0be5
CK
660
661 mutex_lock(&subsys->lock);
662 switch (write_protect) {
663 case NVME_NS_WRITE_PROTECT:
664 req->ns->readonly = true;
665 status = nvmet_write_protect_flush_sync(req);
666 if (status)
667 req->ns->readonly = false;
668 break;
669 case NVME_NS_NO_WRITE_PROTECT:
670 req->ns->readonly = false;
671 status = 0;
672 break;
673 default:
674 break;
675 }
676
677 if (!status)
678 nvmet_ns_changed(subsys, req->ns->nsid);
679 mutex_unlock(&subsys->lock);
680 return status;
681}
682
90107455
JS
683u16 nvmet_set_feat_kato(struct nvmet_req *req)
684{
b7c8f366 685 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
90107455
JS
686
687 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
688
689 nvmet_set_result(req, req->sq->ctrl->kato);
690
691 return 0;
692}
693
694u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
695{
b7c8f366 696 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
90107455 697
2da6e005
CK
698 if (val32 & ~mask) {
699 req->error_loc = offsetof(struct nvme_common_command, cdw11);
90107455 700 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
2da6e005 701 }
90107455
JS
702
703 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
704 nvmet_set_result(req, val32);
705
706 return 0;
707}
708
a07b4970
CH
709static void nvmet_execute_set_features(struct nvmet_req *req)
710{
711 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
b7c8f366 712 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
a07b4970
CH
713 u16 status = 0;
714
e9061c39
CH
715 if (!nvmet_check_data_len(req, 0))
716 return;
717
28dd5cf7 718 switch (cdw10 & 0xff) {
a07b4970
CH
719 case NVME_FEAT_NUM_QUEUES:
720 nvmet_set_result(req,
721 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
722 break;
723 case NVME_FEAT_KATO:
90107455 724 status = nvmet_set_feat_kato(req);
a07b4970 725 break;
c86b8f7b 726 case NVME_FEAT_ASYNC_EVENT:
90107455 727 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
c86b8f7b 728 break;
28dd5cf7
OM
729 case NVME_FEAT_HOST_ID:
730 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
731 break;
dedf0be5
CK
732 case NVME_FEAT_WRITE_PROTECT:
733 status = nvmet_set_feat_write_protect(req);
734 break;
a07b4970 735 default:
2da6e005 736 req->error_loc = offsetof(struct nvme_common_command, cdw10);
a07b4970
CH
737 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
738 break;
739 }
740
741 nvmet_req_complete(req, status);
742}
743
dedf0be5
CK
744static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
745{
746 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
747 u32 result;
748
749 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
2da6e005
CK
750 if (!req->ns) {
751 req->error_loc = offsetof(struct nvme_common_command, nsid);
dedf0be5 752 return NVME_SC_INVALID_NS | NVME_SC_DNR;
2da6e005 753 }
dedf0be5
CK
754 mutex_lock(&subsys->lock);
755 if (req->ns->readonly == true)
756 result = NVME_NS_WRITE_PROTECT;
757 else
758 result = NVME_NS_NO_WRITE_PROTECT;
759 nvmet_set_result(req, result);
760 mutex_unlock(&subsys->lock);
761
762 return 0;
763}
764
90107455
JS
765void nvmet_get_feat_kato(struct nvmet_req *req)
766{
767 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
768}
769
770void nvmet_get_feat_async_event(struct nvmet_req *req)
771{
772 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
773}
774
a07b4970
CH
775static void nvmet_execute_get_features(struct nvmet_req *req)
776{
777 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
b7c8f366 778 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
a07b4970
CH
779 u16 status = 0;
780
e9061c39
CH
781 if (!nvmet_check_data_len(req, 0))
782 return;
783
28dd5cf7 784 switch (cdw10 & 0xff) {
a07b4970
CH
785 /*
786 * These features are mandatory in the spec, but we don't
787 * have a useful way to implement them. We'll eventually
788 * need to come up with some fake values for these.
789 */
790#if 0
791 case NVME_FEAT_ARBITRATION:
792 break;
793 case NVME_FEAT_POWER_MGMT:
794 break;
795 case NVME_FEAT_TEMP_THRESH:
796 break;
797 case NVME_FEAT_ERR_RECOVERY:
798 break;
799 case NVME_FEAT_IRQ_COALESCE:
800 break;
801 case NVME_FEAT_IRQ_CONFIG:
802 break;
803 case NVME_FEAT_WRITE_ATOMIC:
804 break;
c86b8f7b 805#endif
a07b4970 806 case NVME_FEAT_ASYNC_EVENT:
90107455 807 nvmet_get_feat_async_event(req);
a07b4970 808 break;
a07b4970
CH
809 case NVME_FEAT_VOLATILE_WC:
810 nvmet_set_result(req, 1);
811 break;
812 case NVME_FEAT_NUM_QUEUES:
813 nvmet_set_result(req,
814 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
815 break;
816 case NVME_FEAT_KATO:
90107455 817 nvmet_get_feat_kato(req);
a07b4970 818 break;
28dd5cf7
OM
819 case NVME_FEAT_HOST_ID:
820 /* need 128-bit host identifier flag */
b7c8f366 821 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
2da6e005
CK
822 req->error_loc =
823 offsetof(struct nvme_common_command, cdw11);
28dd5cf7
OM
824 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
825 break;
826 }
827
828 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
829 sizeof(req->sq->ctrl->hostid));
830 break;
dedf0be5
CK
831 case NVME_FEAT_WRITE_PROTECT:
832 status = nvmet_get_feat_write_protect(req);
833 break;
a07b4970 834 default:
2da6e005
CK
835 req->error_loc =
836 offsetof(struct nvme_common_command, cdw10);
a07b4970
CH
837 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
838 break;
839 }
840
841 nvmet_req_complete(req, status);
842}
843
90107455 844void nvmet_execute_async_event(struct nvmet_req *req)
a07b4970
CH
845{
846 struct nvmet_ctrl *ctrl = req->sq->ctrl;
847
e9061c39
CH
848 if (!nvmet_check_data_len(req, 0))
849 return;
850
a07b4970
CH
851 mutex_lock(&ctrl->lock);
852 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
853 mutex_unlock(&ctrl->lock);
854 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
855 return;
856 }
857 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
858 mutex_unlock(&ctrl->lock);
859
860 schedule_work(&ctrl->async_event_work);
861}
862
f9362ac1 863void nvmet_execute_keep_alive(struct nvmet_req *req)
a07b4970
CH
864{
865 struct nvmet_ctrl *ctrl = req->sq->ctrl;
866
e9061c39
CH
867 if (!nvmet_check_data_len(req, 0))
868 return;
869
a07b4970
CH
870 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
871 ctrl->cntlid, ctrl->kato);
872
873 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
874 nvmet_req_complete(req, 0);
875}
876
64a0ca88 877u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
a07b4970
CH
878{
879 struct nvme_command *cmd = req->cmd;
64a0ca88 880 u16 ret;
a07b4970 881
d84dd8cd
CH
882 if (nvme_is_fabrics(cmd))
883 return nvmet_parse_fabrics_cmd(req);
884 if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
885 return nvmet_parse_discovery_cmd(req);
886
64a0ca88
PP
887 ret = nvmet_check_ctrl_status(req, cmd);
888 if (unlikely(ret))
889 return ret;
a07b4970
CH
890
891 switch (cmd->common.opcode) {
892 case nvme_admin_get_log_page:
2cb6963a 893 req->execute = nvmet_execute_get_log_page;
2cb6963a 894 return 0;
a07b4970 895 case nvme_admin_identify:
2cb6963a 896 req->execute = nvmet_execute_identify;
2cb6963a 897 return 0;
a07b4970
CH
898 case nvme_admin_abort_cmd:
899 req->execute = nvmet_execute_abort;
a07b4970
CH
900 return 0;
901 case nvme_admin_set_features:
902 req->execute = nvmet_execute_set_features;
a07b4970
CH
903 return 0;
904 case nvme_admin_get_features:
905 req->execute = nvmet_execute_get_features;
a07b4970
CH
906 return 0;
907 case nvme_admin_async_event:
908 req->execute = nvmet_execute_async_event;
a07b4970
CH
909 return 0;
910 case nvme_admin_keep_alive:
911 req->execute = nvmet_execute_keep_alive;
a07b4970
CH
912 return 0;
913 }
914
64a0ca88
PP
915 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
916 req->sq->qid);
2da6e005 917 req->error_loc = offsetof(struct nvme_common_command, opcode);
a07b4970
CH
918 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
919}