Merge tag 'io_uring-6.16-20250630' of git://git.kernel.dk/linux
[linux-2.6-block.git] / drivers / nvme / host / core.c
CommitLineData
bc50ad75 1// SPDX-License-Identifier: GPL-2.0
21d34711
CH
2/*
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
21d34711
CH
5 */
6
4e893ca8 7#include <linux/async.h>
21d34711
CH
8#include <linux/blkdev.h>
9#include <linux/blk-mq.h>
fe45e630 10#include <linux/blk-integrity.h>
c95b708d 11#include <linux/compat.h>
5fd4ce1b 12#include <linux/delay.h>
21d34711 13#include <linux/errno.h>
1673f1f0 14#include <linux/hdreg.h>
21d34711 15#include <linux/kernel.h>
5bae7f73 16#include <linux/module.h>
958f2a0f 17#include <linux/backing-dev.h>
21d34711
CH
18#include <linux/slab.h>
19#include <linux/types.h>
1673f1f0
CH
20#include <linux/pr.h>
21#include <linux/ptrace.h>
22#include <linux/nvme_ioctl.h>
c5552fde 23#include <linux/pm_qos.h>
a1a825ab 24#include <linux/ratelimit.h>
5f60d5f6 25#include <linux/unaligned.h>
21d34711
CH
26
27#include "nvme.h"
038bd4cb 28#include "fabrics.h"
f50fff73 29#include <linux/nvme-auth.h>
21d34711 30
35fe0d12
HR
31#define CREATE_TRACE_POINTS
32#include "trace.h"
33
f3ca80fc
CH
34#define NVME_MINORS (1U << MINORBITS)
35
1a893c2b
CH
36struct nvme_ns_info {
37 struct nvme_ns_ids ids;
38 u32 nsid;
39 __le32 anagrpid;
6339b7ed 40 u8 pi_offset;
30b5f20b
KB
41 u16 endgid;
42 u64 runs;
1a893c2b 43 bool is_shared;
1e4ea66a 44 bool is_readonly;
1a893c2b 45 bool is_ready;
0dd6fff2 46 bool is_removed;
1d811438 47 bool is_rotational;
8a825d22 48 bool no_vwc;
1a893c2b
CH
49};
50
8ae4e447
MO
51unsigned int admin_timeout = 60;
52module_param(admin_timeout, uint, 0644);
ba0ba7d3 53MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
576d55d6 54EXPORT_SYMBOL_GPL(admin_timeout);
ba0ba7d3 55
8ae4e447
MO
56unsigned int nvme_io_timeout = 30;
57module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
ba0ba7d3 58MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
576d55d6 59EXPORT_SYMBOL_GPL(nvme_io_timeout);
ba0ba7d3 60
b3b1b0b0 61static unsigned char shutdown_timeout = 5;
ba0ba7d3
ML
62module_param(shutdown_timeout, byte, 0644);
63MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
64
44e44b29
CH
65static u8 nvme_max_retries = 5;
66module_param_named(max_retries, nvme_max_retries, byte, 0644);
f80ec966 67MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
5bae7f73 68
9947d6a0 69static unsigned long default_ps_max_latency_us = 100000;
c5552fde
AL
70module_param(default_ps_max_latency_us, ulong, 0644);
71MODULE_PARM_DESC(default_ps_max_latency_us,
72 "max power saving latency for new devices; use PM QOS to change per device");
73
c35e30b4
AL
74static bool force_apst;
75module_param(force_apst, bool, 0644);
76MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
77
ebd8a93a
AB
78static unsigned long apst_primary_timeout_ms = 100;
79module_param(apst_primary_timeout_ms, ulong, 0644);
80MODULE_PARM_DESC(apst_primary_timeout_ms,
81 "primary APST timeout in ms");
82
83static unsigned long apst_secondary_timeout_ms = 2000;
84module_param(apst_secondary_timeout_ms, ulong, 0644);
85MODULE_PARM_DESC(apst_secondary_timeout_ms,
86 "secondary APST timeout in ms");
87
88static unsigned long apst_primary_latency_tol_us = 15000;
89module_param(apst_primary_latency_tol_us, ulong, 0644);
90MODULE_PARM_DESC(apst_primary_latency_tol_us,
91 "primary APST latency tolerance in us");
92
93static unsigned long apst_secondary_latency_tol_us = 100000;
94module_param(apst_secondary_latency_tol_us, ulong, 0644);
95MODULE_PARM_DESC(apst_secondary_latency_tol_us,
96 "secondary APST latency tolerance in us");
97
42ab37ea
KB
98/*
99 * Older kernels didn't enable protection information if it was at an offset.
100 * Newer kernels do, so it breaks reads on the upgrade if such formats were
101 * used in prior kernels since the metadata written did not contain a valid
102 * checksum.
103 */
104static bool disable_pi_offsets = false;
105module_param(disable_pi_offsets, bool, 0444);
106MODULE_PARM_DESC(disable_pi_offsets,
107 "disable protection information if it has an offset");
108
b227c59b
RS
109/*
110 * nvme_wq - hosts nvme related works that are not reset or delete
111 * nvme_reset_wq - hosts nvme reset works
112 * nvme_delete_wq - hosts nvme delete works
113 *
97b2512a
NK
114 * nvme_wq will host works such as scan, aen handling, fw activation,
115 * keep-alive, periodic reconnects etc. nvme_reset_wq
b227c59b
RS
116 * runs reset works which also flush works hosted on nvme_wq for
117 * serialization purposes. nvme_delete_wq host controller deletion
118 * works which flush reset works for serialization.
119 */
9a6327d2
SG
120struct workqueue_struct *nvme_wq;
121EXPORT_SYMBOL_GPL(nvme_wq);
122
b227c59b
RS
123struct workqueue_struct *nvme_reset_wq;
124EXPORT_SYMBOL_GPL(nvme_reset_wq);
125
126struct workqueue_struct *nvme_delete_wq;
127EXPORT_SYMBOL_GPL(nvme_delete_wq);
128
ab9e00cc 129static LIST_HEAD(nvme_subsystems);
f227345f 130DEFINE_MUTEX(nvme_subsystems_lock);
1673f1f0 131
9843f685 132static DEFINE_IDA(nvme_instance_ida);
f68abd9c 133static dev_t nvme_ctrl_base_chr_devt;
ab21f3d9
RM
134static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env);
135static const struct class nvme_class = {
136 .name = "nvme",
137 .dev_uevent = nvme_class_uevent,
138};
139
140static const struct class nvme_subsys_class = {
141 .name = "nvme-subsystem",
142};
f3ca80fc 143
2637baed
MI
144static DEFINE_IDA(nvme_ns_chr_minor_ida);
145static dev_t nvme_ns_chr_devt;
ab21f3d9
RM
146static const struct class nvme_ns_chr_class = {
147 .name = "nvme-generic",
148};
2637baed 149
12d9f070 150static void nvme_put_subsystem(struct nvme_subsystem *subsys);
cf39a6bc
SB
151static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
152 unsigned nsid);
b58da2d2
TS
153static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
154 struct nvme_command *cmd);
d4f8359e
CH
155static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page,
156 u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi);
cf39a6bc 157
2405252a 158void nvme_queue_scan(struct nvme_ctrl *ctrl)
50e8d8ee
CH
159{
160 /*
161 * Only new queue scan work when admin and IO queues are both alive
162 */
e6e7f7ac 163 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
50e8d8ee
CH
164 queue_work(nvme_wq, &ctrl->scan_work);
165}
166
4c75f877
KB
167/*
168 * Use this function to proceed with scheduling reset_work for a controller
169 * that had previously been set to the resetting state. This is intended for
170 * code paths that can't be interrupted by other reset attempts. A hot removal
171 * may prevent this from succeeding.
172 */
c1ac9a4b 173int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
4c75f877 174{
e6e7f7ac 175 if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
4c75f877
KB
176 return -EBUSY;
177 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
178 return -EBUSY;
179 return 0;
180}
c1ac9a4b 181EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
4c75f877 182
8c4dfea9
VG
183static void nvme_failfast_work(struct work_struct *work)
184{
185 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
186 struct nvme_ctrl, failfast_work);
187
e6e7f7ac 188 if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
8c4dfea9
VG
189 return;
190
191 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
192 dev_info(ctrl->device, "failfast expired\n");
193 nvme_kick_requeue_lists(ctrl);
194}
195
196static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
197{
198 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
199 return;
200
201 schedule_delayed_work(&ctrl->failfast_work,
202 ctrl->opts->fast_io_fail_tmo * HZ);
203}
204
205static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
206{
207 if (!ctrl->opts)
208 return;
209
210 cancel_delayed_work_sync(&ctrl->failfast_work);
211 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
212}
213
214
d86c4d8e
CH
215int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
216{
217 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
218 return -EBUSY;
b227c59b 219 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
d86c4d8e
CH
220 return -EBUSY;
221 return 0;
222}
223EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
224
2405252a 225int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
d86c4d8e
CH
226{
227 int ret;
228
229 ret = nvme_reset_ctrl(ctrl);
8000d1fd 230 if (!ret) {
d86c4d8e 231 flush_work(&ctrl->reset_work);
e6e7f7ac 232 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
8000d1fd
NC
233 ret = -ENETRESET;
234 }
235
d86c4d8e
CH
236 return ret;
237}
238
a686ed75 239static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
c5017e85 240{
77d0612d 241 dev_info(ctrl->device,
e5ea42fa 242 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
77d0612d 243
4054637c 244 flush_work(&ctrl->reset_work);
6cd53d14
CH
245 nvme_stop_ctrl(ctrl);
246 nvme_remove_namespaces(ctrl);
c5017e85 247 ctrl->ops->delete_ctrl(ctrl);
6cd53d14 248 nvme_uninit_ctrl(ctrl);
c5017e85
CH
249}
250
a686ed75
BVA
251static void nvme_delete_ctrl_work(struct work_struct *work)
252{
253 struct nvme_ctrl *ctrl =
254 container_of(work, struct nvme_ctrl, delete_work);
255
256 nvme_do_delete_ctrl(ctrl);
257}
258
c5017e85
CH
259int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
260{
261 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
262 return -EBUSY;
b227c59b 263 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
c5017e85
CH
264 return -EBUSY;
265 return 0;
266}
267EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
268
942e21c0 269void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
c5017e85 270{
c5017e85 271 /*
01fc08ff
YY
272 * Keep a reference until nvme_do_delete_ctrl() complete,
273 * since ->delete_ctrl can free the controller.
c5017e85
CH
274 */
275 nvme_get_ctrl(ctrl);
6721c18a 276 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
b9c77583 277 nvme_do_delete_ctrl(ctrl);
c5017e85 278 nvme_put_ctrl(ctrl);
c5017e85 279}
c5017e85 280
2f9c1736 281static blk_status_t nvme_error_status(u16 status)
27fa9bc5 282{
d89a5c67 283 switch (status & NVME_SCT_SC_MASK) {
27fa9bc5 284 case NVME_SC_SUCCESS:
2a842aca 285 return BLK_STS_OK;
27fa9bc5 286 case NVME_SC_CAP_EXCEEDED:
2a842aca 287 return BLK_STS_NOSPC;
e96fef2c 288 case NVME_SC_LBA_RANGE:
35038bff
KB
289 case NVME_SC_CMD_INTERRUPTED:
290 case NVME_SC_NS_NOT_READY:
e96fef2c
KB
291 return BLK_STS_TARGET;
292 case NVME_SC_BAD_ATTRIBUTES:
e96fef2c
KB
293 case NVME_SC_INVALID_OPCODE:
294 case NVME_SC_INVALID_FIELD:
295 case NVME_SC_INVALID_NS:
2a842aca 296 return BLK_STS_NOTSUPP;
e02ab023
JG
297 case NVME_SC_WRITE_FAULT:
298 case NVME_SC_READ_ERROR:
299 case NVME_SC_UNWRITTEN_BLOCK:
a751da33
CH
300 case NVME_SC_ACCESS_DENIED:
301 case NVME_SC_READ_ONLY:
e96fef2c 302 case NVME_SC_COMPARE_FAILED:
2a842aca 303 return BLK_STS_MEDIUM;
a751da33
CH
304 case NVME_SC_GUARD_CHECK:
305 case NVME_SC_APPTAG_CHECK:
306 case NVME_SC_REFTAG_CHECK:
307 case NVME_SC_INVALID_PI:
308 return BLK_STS_PROTECTION;
309 case NVME_SC_RESERVATION_CONFLICT:
7ba15083 310 return BLK_STS_RESV_CONFLICT;
1c0d12c0
SG
311 case NVME_SC_HOST_PATH_ERROR:
312 return BLK_STS_TRANSPORT;
afaf5c6c
KB
313 case NVME_SC_ZONE_TOO_MANY_ACTIVE:
314 return BLK_STS_ZONE_ACTIVE_RESOURCE;
315 case NVME_SC_ZONE_TOO_MANY_OPEN:
316 return BLK_STS_ZONE_OPEN_RESOURCE;
2a842aca
CH
317 default:
318 return BLK_STS_IOERR;
27fa9bc5
CH
319 }
320}
27fa9bc5 321
49cd84b6
KB
322static void nvme_retry_req(struct request *req)
323{
49cd84b6
KB
324 unsigned long delay = 0;
325 u16 crd;
326
327 /* The mask and shift result must be <= 3 */
dd0b0a4a 328 crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11;
f9063a53
MI
329 if (crd)
330 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
49cd84b6
KB
331
332 nvme_req(req)->retries++;
333 blk_mq_requeue_request(req, false);
334 blk_mq_delay_kick_requeue_list(req->q, delay);
335}
336
bd83fe6f
AA
337static void nvme_log_error(struct request *req)
338{
339 struct nvme_ns *ns = req->q->queuedata;
340 struct nvme_request *nr = nvme_req(req);
341
342 if (ns) {
9419e71b 343 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
bd83fe6f
AA
344 ns->disk ? ns->disk->disk_name : "?",
345 nvme_get_opcode_str(nr->cmd->common.opcode),
346 nr->cmd->common.opcode,
0372dd4e 347 nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
9419e71b 348 blk_rq_bytes(req) >> ns->head->lba_shift,
bd83fe6f 349 nvme_get_error_status_str(nr->status),
d89a5c67
WH
350 NVME_SCT(nr->status), /* Status Code Type */
351 nr->status & NVME_SC_MASK, /* Status Code */
dd0b0a4a
WH
352 nr->status & NVME_STATUS_MORE ? "MORE " : "",
353 nr->status & NVME_STATUS_DNR ? "DNR " : "");
bd83fe6f
AA
354 return;
355 }
356
357 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
358 dev_name(nr->ctrl->device),
359 nvme_get_admin_opcode_str(nr->cmd->common.opcode),
360 nr->cmd->common.opcode,
361 nvme_get_error_status_str(nr->status),
d89a5c67
WH
362 NVME_SCT(nr->status), /* Status Code Type */
363 nr->status & NVME_SC_MASK, /* Status Code */
dd0b0a4a
WH
364 nr->status & NVME_STATUS_MORE ? "MORE " : "",
365 nr->status & NVME_STATUS_DNR ? "DNR " : "");
bd83fe6f
AA
366}
367
9f079dda
AA
368static void nvme_log_err_passthru(struct request *req)
369{
370 struct nvme_ns *ns = req->q->queuedata;
371 struct nvme_request *nr = nvme_req(req);
372
373 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
374 "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
375 ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
376 ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
377 nvme_get_admin_opcode_str(nr->cmd->common.opcode),
378 nr->cmd->common.opcode,
379 nvme_get_error_status_str(nr->status),
d89a5c67
WH
380 NVME_SCT(nr->status), /* Status Code Type */
381 nr->status & NVME_SC_MASK, /* Status Code */
dd0b0a4a
WH
382 nr->status & NVME_STATUS_MORE ? "MORE " : "",
383 nr->status & NVME_STATUS_DNR ? "DNR " : "",
9f079dda
AA
384 nr->cmd->common.cdw10,
385 nr->cmd->common.cdw11,
386 nr->cmd->common.cdw12,
387 nr->cmd->common.cdw13,
388 nr->cmd->common.cdw14,
389 nr->cmd->common.cdw14);
390}
391
5ddaabe8
CH
392enum nvme_disposition {
393 COMPLETE,
394 RETRY,
395 FAILOVER,
f50fff73 396 AUTHENTICATE,
5ddaabe8
CH
397};
398
399static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
77f02a7a 400{
5ddaabe8
CH
401 if (likely(nvme_req(req)->status == 0))
402 return COMPLETE;
908e4564 403
5ddaabe8 404 if (blk_noretry_request(req) ||
dd0b0a4a 405 (nvme_req(req)->status & NVME_STATUS_DNR) ||
5ddaabe8
CH
406 nvme_req(req)->retries >= nvme_max_retries)
407 return COMPLETE;
ca5554a6 408
d89a5c67 409 if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED)
44350336
HR
410 return AUTHENTICATE;
411
5ddaabe8 412 if (req->cmd_flags & REQ_NVME_MPATH) {
5eac5f33
CL
413 if (nvme_is_path_error(nvme_req(req)->status) ||
414 blk_queue_dying(req->q))
5ddaabe8 415 return FAILOVER;
5eac5f33
CL
416 } else {
417 if (blk_queue_dying(req->q))
418 return COMPLETE;
5ddaabe8 419 }
16686f3a 420
5ddaabe8
CH
421 return RETRY;
422}
6e3ca03e 423
c234a653 424static inline void nvme_end_req_zoned(struct request *req)
5ddaabe8 425{
5ddaabe8 426 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
0372dd4e
DW
427 req_op(req) == REQ_OP_ZONE_APPEND) {
428 struct nvme_ns *ns = req->q->queuedata;
429
430 req->__sector = nvme_lba_to_sect(ns->head,
240e6ee2 431 le64_to_cpu(nvme_req(req)->result.u64));
0372dd4e 432 }
c234a653
JA
433}
434
2fe7b422
KB
435static inline void __nvme_end_req(struct request *req)
436{
e5c2bcc0
SK
437 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
438 if (blk_rq_is_passthrough(req))
439 nvme_log_err_passthru(req);
440 else
441 nvme_log_error(req);
442 }
2fe7b422
KB
443 nvme_end_req_zoned(req);
444 nvme_trace_bio_complete(req);
445 if (req->cmd_flags & REQ_NVME_MPATH)
446 nvme_mpath_end_request(req);
447}
448
a2e4c5f5 449void nvme_end_req(struct request *req)
c234a653
JA
450{
451 blk_status_t status = nvme_error_status(nvme_req(req)->status);
35fe0d12 452
2fe7b422 453 __nvme_end_req(req);
908e4564 454 blk_mq_end_request(req, status);
77f02a7a 455}
5ddaabe8
CH
456
457void nvme_complete_rq(struct request *req)
458{
f50fff73
HR
459 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
460
5ddaabe8
CH
461 trace_nvme_complete_rq(req);
462 nvme_cleanup_cmd(req);
463
774a9636
US
464 /*
465 * Completions of long-running commands should not be able to
466 * defer sending of periodic keep alives, since the controller
467 * may have completed processing such commands a long time ago
468 * (arbitrarily close to command submission time).
469 * req->deadline - req->timeout is the command submission time
470 * in jiffies.
471 */
472 if (ctrl->kas &&
473 req->deadline - req->timeout >= ctrl->ka_last_check_time)
f50fff73 474 ctrl->comp_seen = true;
5ddaabe8
CH
475
476 switch (nvme_decide_disposition(req)) {
477 case COMPLETE:
478 nvme_end_req(req);
479 return;
480 case RETRY:
481 nvme_retry_req(req);
482 return;
483 case FAILOVER:
484 nvme_failover_req(req);
485 return;
f50fff73 486 case AUTHENTICATE:
d6800634 487#ifdef CONFIG_NVME_HOST_AUTH
f50fff73
HR
488 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
489 nvme_retry_req(req);
490#else
491 nvme_end_req(req);
492#endif
493 return;
5ddaabe8
CH
494 }
495}
77f02a7a
CH
496EXPORT_SYMBOL_GPL(nvme_complete_rq);
497
c234a653
JA
498void nvme_complete_batch_req(struct request *req)
499{
00e757b6 500 trace_nvme_complete_rq(req);
c234a653 501 nvme_cleanup_cmd(req);
2fe7b422 502 __nvme_end_req(req);
c234a653
JA
503}
504EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
505
dda3248e
CL
506/*
507 * Called to unwind from ->queue_rq on a failed command submission so that the
508 * multipathing code gets called to potentially failover to another path.
509 * The caller needs to unwind all transport specific resource allocations and
510 * must return propagate the return value.
511 */
512blk_status_t nvme_host_path_error(struct request *req)
513{
514 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
515 blk_mq_set_request_complete(req);
516 nvme_complete_rq(req);
517 return BLK_STS_OK;
518}
519EXPORT_SYMBOL_GPL(nvme_host_path_error);
520
2dd6532e 521bool nvme_cancel_request(struct request *req, void *data)
c55a2fd4 522{
c55a2fd4
ML
523 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
524 "Cancelling I/O %d", req->tag);
525
d4f1d5f7
LY
526 /* don't abort one completed or idle request */
527 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT)
78ca4072
ML
528 return true;
529
2dc3947b 530 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
d3589381 531 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
15f73f5b 532 blk_mq_complete_request(req);
7baa8572 533 return true;
c55a2fd4
ML
534}
535EXPORT_SYMBOL_GPL(nvme_cancel_request);
536
25479069
CL
537void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
538{
539 if (ctrl->tagset) {
540 blk_mq_tagset_busy_iter(ctrl->tagset,
541 nvme_cancel_request, ctrl);
542 blk_mq_tagset_wait_completed_request(ctrl->tagset);
543 }
544}
545EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
546
547void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
548{
549 if (ctrl->admin_tagset) {
550 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
551 nvme_cancel_request, ctrl);
552 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
553 }
554}
555EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
556
bb8d261e
CH
557bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
558 enum nvme_ctrl_state new_state)
559{
f6b6a28e 560 enum nvme_ctrl_state old_state;
0a72bbba 561 unsigned long flags;
bb8d261e
CH
562 bool changed = false;
563
0a72bbba 564 spin_lock_irqsave(&ctrl->lock, flags);
f6b6a28e 565
e6e7f7ac 566 old_state = nvme_ctrl_state(ctrl);
bb8d261e
CH
567 switch (new_state) {
568 case NVME_CTRL_LIVE:
569 switch (old_state) {
ad6a0a52 570 case NVME_CTRL_CONNECTING:
bb8d261e 571 changed = true;
df561f66 572 fallthrough;
bb8d261e
CH
573 default:
574 break;
575 }
576 break;
577 case NVME_CTRL_RESETTING:
578 switch (old_state) {
579 case NVME_CTRL_NEW:
def61eca 580 case NVME_CTRL_LIVE:
def61eca 581 changed = true;
df561f66 582 fallthrough;
def61eca
CH
583 default:
584 break;
585 }
586 break;
ad6a0a52 587 case NVME_CTRL_CONNECTING:
def61eca 588 switch (old_state) {
b754a32c 589 case NVME_CTRL_NEW:
3cec7f9d 590 case NVME_CTRL_RESETTING:
bb8d261e 591 changed = true;
df561f66 592 fallthrough;
bb8d261e
CH
593 default:
594 break;
595 }
596 break;
597 case NVME_CTRL_DELETING:
598 switch (old_state) {
599 case NVME_CTRL_LIVE:
600 case NVME_CTRL_RESETTING:
ad6a0a52 601 case NVME_CTRL_CONNECTING:
bb8d261e 602 changed = true;
df561f66 603 fallthrough;
bb8d261e
CH
604 default:
605 break;
606 }
607 break;
ecca390e
SG
608 case NVME_CTRL_DELETING_NOIO:
609 switch (old_state) {
610 case NVME_CTRL_DELETING:
611 case NVME_CTRL_DEAD:
612 changed = true;
df561f66 613 fallthrough;
ecca390e
SG
614 default:
615 break;
616 }
617 break;
0ff9d4e1
KB
618 case NVME_CTRL_DEAD:
619 switch (old_state) {
620 case NVME_CTRL_DELETING:
621 changed = true;
df561f66 622 fallthrough;
0ff9d4e1
KB
623 default:
624 break;
625 }
626 break;
bb8d261e
CH
627 default:
628 break;
629 }
bb8d261e 630
c1ac9a4b 631 if (changed) {
e6e7f7ac 632 WRITE_ONCE(ctrl->state, new_state);
c1ac9a4b
KB
633 wake_up_all(&ctrl->state_wq);
634 }
bb8d261e 635
0a72bbba 636 spin_unlock_irqrestore(&ctrl->lock, flags);
8c4dfea9
VG
637 if (!changed)
638 return false;
639
e6e7f7ac 640 if (new_state == NVME_CTRL_LIVE) {
8c4dfea9
VG
641 if (old_state == NVME_CTRL_CONNECTING)
642 nvme_stop_failfast_work(ctrl);
32acab31 643 nvme_kick_requeue_lists(ctrl);
e6e7f7ac 644 } else if (new_state == NVME_CTRL_CONNECTING &&
8c4dfea9
VG
645 old_state == NVME_CTRL_RESETTING) {
646 nvme_start_failfast_work(ctrl);
647 }
bb8d261e
CH
648 return changed;
649}
650EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
651
c1ac9a4b
KB
652/*
653 * Waits for the controller state to be resetting, or returns false if it is
654 * not possible to ever transition to that state.
655 */
656bool nvme_wait_reset(struct nvme_ctrl *ctrl)
657{
658 wait_event(ctrl->state_wq,
659 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
660 nvme_state_terminal(ctrl));
e6e7f7ac 661 return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
c1ac9a4b
KB
662}
663EXPORT_SYMBOL_GPL(nvme_wait_reset);
664
ed754e5d
CH
665static void nvme_free_ns_head(struct kref *ref)
666{
667 struct nvme_ns_head *head =
668 container_of(ref, struct nvme_ns_head, ref);
669
9e221d8c 670 nvme_mpath_put_disk(head);
8b850475 671 ida_free(&head->subsys->ns_ida, head->instance);
f5ad3991 672 cleanup_srcu_struct(&head->srcu);
12d9f070 673 nvme_put_subsystem(head->subsys);
38e8397d 674 kfree(head->plids);
ed754e5d
CH
675 kfree(head);
676}
677
1496bd49 678bool nvme_tryget_ns_head(struct nvme_ns_head *head)
871ca3ef
CH
679{
680 return kref_get_unless_zero(&head->ref);
681}
682
1496bd49 683void nvme_put_ns_head(struct nvme_ns_head *head)
ed754e5d
CH
684{
685 kref_put(&head->ref, nvme_free_ns_head);
686}
687
1673f1f0
CH
688static void nvme_free_ns(struct kref *kref)
689{
690 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
691
1673f1f0 692 put_disk(ns->disk);
ed754e5d 693 nvme_put_ns_head(ns->head);
075790eb 694 nvme_put_ctrl(ns->ctrl);
1673f1f0
CH
695 kfree(ns);
696}
697
be647e2c 698bool nvme_get_ns(struct nvme_ns *ns)
4c74d1f8
KJ
699{
700 return kref_get_unless_zero(&ns->kref);
701}
702
24493b8b 703void nvme_put_ns(struct nvme_ns *ns)
1673f1f0
CH
704{
705 kref_put(&ns->kref, nvme_free_ns);
706}
cdd30ebb 707EXPORT_SYMBOL_NS_GPL(nvme_put_ns, "NVME_TARGET_PASSTHRU");
1673f1f0 708
bb06ec31
JS
709static inline void nvme_clear_nvme_request(struct request *req)
710{
ae5e6886 711 nvme_req(req)->status = 0;
c03fd85d
CK
712 nvme_req(req)->retries = 0;
713 nvme_req(req)->flags = 0;
714 req->rq_flags |= RQF_DONTPREP;
bb06ec31
JS
715}
716
e559398f
CH
717/* initialize a passthrough request */
718void nvme_init_request(struct request *req, struct nvme_command *cmd)
39dfe844 719{
9f079dda
AA
720 struct nvme_request *nr = nvme_req(req);
721 bool logging_enabled;
722
723 if (req->q->queuedata) {
724 struct nvme_ns *ns = req->q->disk->private_data;
725
1f4137e8 726 logging_enabled = ns->head->passthru_err_log_enabled;
0d2e7c84 727 req->timeout = NVME_IO_TIMEOUT;
9f079dda
AA
728 } else { /* no queuedata implies admin queue */
729 logging_enabled = nr->ctrl->passthru_err_log_enabled;
dc96f938 730 req->timeout = NVME_ADMIN_TIMEOUT;
9f079dda
AA
731 }
732
733 if (!logging_enabled)
734 req->rq_flags |= RQF_QUIET;
21d34711 735
f4b9e6c9
KB
736 /* passthru commands should let the driver set the SGL flags */
737 cmd->common.flags &= ~NVME_CMD_SGL_ALL;
738
21d34711 739 req->cmd_flags |= REQ_FAILFAST_DRIVER;
be42a33b 740 if (req->mq_hctx->type == HCTX_TYPE_POLL)
6ce913fe 741 req->cmd_flags |= REQ_POLLED;
bb06ec31 742 nvme_clear_nvme_request(req);
9f079dda 743 memcpy(nr->cmd, cmd, sizeof(*cmd));
39dfe844 744}
e559398f 745EXPORT_SYMBOL_GPL(nvme_init_request);
39dfe844 746
a9715744
TC
747/*
748 * For something we're not in a state to send to the device the default action
749 * is to busy it and retry it after the controller state is recovered. However,
750 * if the controller is deleting or if anything is marked for failfast or
751 * nvme multipath it is immediately failed.
752 *
753 * Note: commands used to initialize the controller will be marked for failfast.
754 * Note: nvme cli/ioctl commands are marked for failfast.
755 */
756blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
757 struct request *rq)
758{
e6e7f7ac
KB
759 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
760
761 if (state != NVME_CTRL_DELETING_NOIO &&
762 state != NVME_CTRL_DELETING &&
763 state != NVME_CTRL_DEAD &&
a9715744
TC
764 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
765 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
766 return BLK_STS_RESOURCE;
767 return nvme_host_path_error(rq);
768}
769EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
770
771bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
6d3c7fb1 772 bool queue_live, enum nvme_ctrl_state state)
a9715744
TC
773{
774 struct nvme_request *req = nvme_req(rq);
775
776 /*
777 * currently we have a problem sending passthru commands
778 * on the admin_q if the controller is not LIVE because we can't
779 * make sure that they are going out after the admin connect,
780 * controller enable and/or other commands in the initialization
781 * sequence. until the controller will be LIVE, fail with
782 * BLK_STS_RESOURCE so that they will be rescheduled.
783 */
784 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
785 return false;
786
787 if (ctrl->ops->flags & NVME_F_FABRICS) {
788 /*
789 * Only allow commands on a live queue, except for the connect
790 * command, which is require to set the queue live in the
791 * appropinquate states.
792 */
6d3c7fb1 793 switch (state) {
a9715744
TC
794 case NVME_CTRL_CONNECTING:
795 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
f50fff73
HR
796 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
797 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
798 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
a9715744
TC
799 return true;
800 break;
801 default:
802 break;
803 case NVME_CTRL_DEAD:
804 return false;
805 }
806 }
807
808 return queue_live;
809}
810EXPORT_SYMBOL_GPL(__nvme_check_ready);
811
8093f7ca
ML
812static inline void nvme_setup_flush(struct nvme_ns *ns,
813 struct nvme_command *cmnd)
814{
9c3d2929 815 memset(cmnd, 0, sizeof(*cmnd));
8093f7ca 816 cmnd->common.opcode = nvme_cmd_flush;
ed754e5d 817 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
8093f7ca
ML
818}
819
fc17b653 820static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
8093f7ca
ML
821 struct nvme_command *cmnd)
822{
b35ba01e 823 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
8093f7ca 824 struct nvme_dsm_range *range;
b35ba01e 825 struct bio *bio;
8093f7ca 826
530436c4
EH
827 /*
828 * Some devices do not consider the DSM 'Number of Ranges' field when
829 * determining how much data to DMA. Always allocate memory for maximum
830 * number of segments to prevent device reading beyond end of buffer.
831 */
832 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
833
834 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
cb5b7262
JA
835 if (!range) {
836 /*
837 * If we fail allocation our range, fallback to the controller
838 * discard page. If that's also busy, it's safe to return
839 * busy, as we know we can make progress once that's freed.
840 */
841 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
842 return BLK_STS_RESOURCE;
843
844 range = page_address(ns->ctrl->discard_page);
845 }
8093f7ca 846
37f0dc2e 847 if (queue_max_discard_segments(req->q) == 1) {
0372dd4e 848 u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req));
9419e71b 849 u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
37f0dc2e
ML
850
851 range[0].cattr = cpu_to_le32(0);
852 range[0].nlb = cpu_to_le32(nlb);
853 range[0].slba = cpu_to_le64(slba);
854 n = 1;
855 } else {
856 __rq_for_each_bio(bio, req) {
0372dd4e
DW
857 u64 slba = nvme_sect_to_lba(ns->head,
858 bio->bi_iter.bi_sector);
9419e71b 859 u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
37f0dc2e
ML
860
861 if (n < segments) {
862 range[n].cattr = cpu_to_le32(0);
863 range[n].nlb = cpu_to_le32(nlb);
864 range[n].slba = cpu_to_le64(slba);
865 }
866 n++;
8cb6af7b 867 }
b35ba01e
CH
868 }
869
870 if (WARN_ON_ONCE(n != segments)) {
cb5b7262
JA
871 if (virt_to_page(range) == ns->ctrl->discard_page)
872 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
873 else
874 kfree(range);
fc17b653 875 return BLK_STS_IOERR;
b35ba01e 876 }
8093f7ca 877
9c3d2929 878 memset(cmnd, 0, sizeof(*cmnd));
8093f7ca 879 cmnd->dsm.opcode = nvme_cmd_dsm;
ed754e5d 880 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
f1dd03a8 881 cmnd->dsm.nr = cpu_to_le32(segments - 1);
8093f7ca
ML
882 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
883
4bee16da 884 bvec_set_virt(&req->special_vec, range, alloc_size);
f9d03f96 885 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
8093f7ca 886
fc17b653 887 return BLK_STS_OK;
8093f7ca 888}
8093f7ca 889
472292cd
KJ
890static void nvme_set_app_tag(struct request *req, struct nvme_command *cmnd)
891{
892 cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
893 cmnd->rw.lbatm = cpu_to_le16(0xffff);
894}
895
4020aad8
KB
896static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
897 struct request *req)
898{
899 u32 upper, lower;
900 u64 ref48;
901
902 /* both rw and write zeroes share the same reftag format */
9419e71b 903 switch (ns->head->guard_type) {
4020aad8
KB
904 case NVME_NVM_NS_16B_GUARD:
905 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
906 break;
907 case NVME_NVM_NS_64B_GUARD:
908 ref48 = ext_pi_ref_tag(req);
909 lower = lower_32_bits(ref48);
910 upper = upper_32_bits(ref48);
911
912 cmnd->rw.reftag = cpu_to_le32(lower);
913 cmnd->rw.cdw3 = cpu_to_le32(upper);
914 break;
915 default:
916 break;
917 }
918}
919
6e02318e
CK
920static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
921 struct request *req, struct nvme_command *cmnd)
922{
9c3d2929
JA
923 memset(cmnd, 0, sizeof(*cmnd));
924
6e02318e
CK
925 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
926 return nvme_setup_discard(ns, req, cmnd);
927
928 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
929 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
930 cmnd->write_zeroes.slba =
0372dd4e 931 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
6e02318e 932 cmnd->write_zeroes.length =
9419e71b 933 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
00b33cf3 934
9419e71b
DW
935 if (!(req->cmd_flags & REQ_NOUNMAP) &&
936 (ns->head->features & NVME_NS_DEAC))
1b96f862
CH
937 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
938
0372dd4e 939 if (nvme_ns_has_pi(ns->head)) {
1b96f862 940 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
00b33cf3 941
9419e71b 942 switch (ns->head->pi_type) {
00b33cf3
KJ
943 case NVME_NS_DPS_PI_TYPE1:
944 case NVME_NS_DPS_PI_TYPE2:
4020aad8 945 nvme_set_ref_tag(ns, cmnd, req);
00b33cf3
KJ
946 break;
947 }
948 }
949
6e02318e
CK
950 return BLK_STS_OK;
951}
952
5f9bbea0
AA
953/*
954 * NVMe does not support a dedicated command to issue an atomic write. A write
955 * which does adhere to the device atomic limits will silently be executed
956 * non-atomically. The request issuer should ensure that the write is within
957 * the queue atomic writes limits, but just validate this in case it is not.
958 */
959static bool nvme_valid_atomic_write(struct request *req)
960{
961 struct request_queue *q = req->q;
962 u32 boundary_bytes = queue_atomic_write_boundary_bytes(q);
963
964 if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q))
965 return false;
966
967 if (boundary_bytes) {
968 u64 mask = boundary_bytes - 1, imask = ~mask;
969 u64 start = blk_rq_pos(req) << SECTOR_SHIFT;
970 u64 end = start + blk_rq_bytes(req) - 1;
971
972 /* If greater then must be crossing a boundary */
973 if (blk_rq_bytes(req) > boundary_bytes)
974 return false;
975
976 if ((start & imask) != (end & imask))
977 return false;
978 }
979
980 return true;
981}
982
ebe6d874 983static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
240e6ee2
KB
984 struct request *req, struct nvme_command *cmnd,
985 enum nvme_opcode op)
8093f7ca
ML
986{
987 u16 control = 0;
988 u32 dsmgmt = 0;
989
990 if (req->cmd_flags & REQ_FUA)
991 control |= NVME_RW_FUA;
992 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
993 control |= NVME_RW_LR;
994
995 if (req->cmd_flags & REQ_RAHEAD)
996 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
997
38e8397d
KB
998 if (op == nvme_cmd_write && ns->head->nr_plids) {
999 u16 write_stream = req->bio->bi_write_stream;
1000
1001 if (WARN_ON_ONCE(write_stream > ns->head->nr_plids))
1002 return BLK_STS_INVAL;
1003
1004 if (write_stream) {
1005 dsmgmt |= ns->head->plids[write_stream - 1] << 16;
1006 control |= NVME_RW_DTYPE_DPLCMT;
1007 }
1008 }
1009
5f9bbea0
AA
1010 if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req))
1011 return BLK_STS_INVAL;
1012
240e6ee2 1013 cmnd->rw.opcode = op;
a9a7e30f 1014 cmnd->rw.flags = 0;
ed754e5d 1015 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
4020aad8
KB
1016 cmnd->rw.cdw2 = 0;
1017 cmnd->rw.cdw3 = 0;
a9a7e30f 1018 cmnd->rw.metadata = 0;
0372dd4e
DW
1019 cmnd->rw.slba =
1020 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
9419e71b
DW
1021 cmnd->rw.length =
1022 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
a9a7e30f 1023 cmnd->rw.reftag = 0;
cead0b89
AG
1024 cmnd->rw.lbat = 0;
1025 cmnd->rw.lbatm = 0;
8093f7ca 1026
9419e71b 1027 if (ns->head->ms) {
715ea9e0 1028 /*
44e479d7 1029 * If formatted with metadata, the block layer always provides a
715ea9e0
CH
1030 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
1031 * we enable the PRACT bit for protection information or set the
1032 * namespace capacity to zero to prevent any I/O.
1033 */
1034 if (!blk_integrity_rq(req)) {
0372dd4e 1035 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
715ea9e0
CH
1036 return BLK_STS_NOTSUPP;
1037 control |= NVME_RW_PRINFO_PRACT;
1038 }
1039
2c0487d8 1040 if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
8093f7ca 1041 control |= NVME_RW_PRINFO_PRCHK_GUARD;
2c0487d8
AG
1042 if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
1043 control |= NVME_RW_PRINFO_PRCHK_REF;
240e6ee2
KB
1044 if (op == nvme_cmd_zone_append)
1045 control |= NVME_RW_APPEND_PIREMAP;
4020aad8 1046 nvme_set_ref_tag(ns, cmnd, req);
8093f7ca 1047 }
472292cd
KJ
1048 if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
1049 control |= NVME_RW_PRINFO_PRCHK_APP;
1050 nvme_set_app_tag(req, cmnd);
8093f7ca 1051 }
8093f7ca
ML
1052 }
1053
1054 cmnd->rw.control = cpu_to_le16(control);
1055 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
ebe6d874 1056 return 0;
8093f7ca
ML
1057}
1058
f7f1fc36
MG
1059void nvme_cleanup_cmd(struct request *req)
1060{
f7f1fc36 1061 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
fc97e942 1062 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
cb5b7262 1063
3973e15f 1064 if (req->special_vec.bv_page == ctrl->discard_page)
fc97e942 1065 clear_bit_unlock(0, &ctrl->discard_page_busy);
cb5b7262 1066 else
3973e15f 1067 kfree(bvec_virt(&req->special_vec));
e5d574ab 1068 req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
f7f1fc36
MG
1069 }
1070}
1071EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
1072
f4b9e6c9 1073blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
8093f7ca 1074{
f4b9e6c9 1075 struct nvme_command *cmd = nvme_req(req)->cmd;
fc17b653 1076 blk_status_t ret = BLK_STS_OK;
8093f7ca 1077
9c3d2929 1078 if (!(req->rq_flags & RQF_DONTPREP))
c03fd85d 1079 nvme_clear_nvme_request(req);
987f699a 1080
aebf526b
CH
1081 switch (req_op(req)) {
1082 case REQ_OP_DRV_IN:
1083 case REQ_OP_DRV_OUT:
f4b9e6c9 1084 /* these are setup prior to execution in nvme_init_request() */
aebf526b
CH
1085 break;
1086 case REQ_OP_FLUSH:
8093f7ca 1087 nvme_setup_flush(ns, cmd);
aebf526b 1088 break;
240e6ee2
KB
1089 case REQ_OP_ZONE_RESET_ALL:
1090 case REQ_OP_ZONE_RESET:
1091 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
1092 break;
1093 case REQ_OP_ZONE_OPEN:
1094 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
1095 break;
1096 case REQ_OP_ZONE_CLOSE:
1097 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
1098 break;
1099 case REQ_OP_ZONE_FINISH:
1100 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
1101 break;
e850fd16 1102 case REQ_OP_WRITE_ZEROES:
6e02318e
CK
1103 ret = nvme_setup_write_zeroes(ns, req, cmd);
1104 break;
aebf526b 1105 case REQ_OP_DISCARD:
8093f7ca 1106 ret = nvme_setup_discard(ns, req, cmd);
aebf526b
CH
1107 break;
1108 case REQ_OP_READ:
240e6ee2
KB
1109 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
1110 break;
aebf526b 1111 case REQ_OP_WRITE:
240e6ee2
KB
1112 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
1113 break;
1114 case REQ_OP_ZONE_APPEND:
1115 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
aebf526b
CH
1116 break;
1117 default:
1118 WARN_ON_ONCE(1);
fc17b653 1119 return BLK_STS_IOERR;
aebf526b 1120 }
8093f7ca 1121
e7006de6 1122 cmd->common.command_id = nvme_cid(req);
5d87eb94 1123 trace_nvme_setup_cmd(req, cmd);
8093f7ca
ML
1124 return ret;
1125}
1126EXPORT_SYMBOL_GPL(nvme_setup_cmd);
1127
ae5e6886
KB
1128/*
1129 * Return values:
1130 * 0: success
1131 * >0: nvme controller's cqe status response
1132 * <0: kernel error in lieu of controller response
1133 */
62281b9e 1134int nvme_execute_rq(struct request *rq, bool at_head)
ae5e6886
KB
1135{
1136 blk_status_t status;
1137
b84ba30b 1138 status = blk_execute_rq(rq, at_head);
ae5e6886
KB
1139 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1140 return -EINTR;
1141 if (nvme_req(rq)->status)
1142 return nvme_req(rq)->status;
1143 return blk_status_to_errno(status);
1144}
cdd30ebb 1145EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, "NVME_TARGET_PASSTHRU");
ae5e6886 1146
4160982e
CH
1147/*
1148 * Returns 0 on success. If the result is negative, it's a Linux error code;
1149 * if the result is positive, it's an NVM Express status code
1150 */
1151int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
d49187e9 1152 union nvme_result *result, void *buffer, unsigned bufflen,
bd2687f2 1153 int qid, nvme_submit_flags_t flags)
4160982e
CH
1154{
1155 struct request *req;
1156 int ret;
bd2687f2 1157 blk_mq_req_flags_t blk_flags = 0;
4160982e 1158
bd2687f2
HR
1159 if (flags & NVME_SUBMIT_NOWAIT)
1160 blk_flags |= BLK_MQ_REQ_NOWAIT;
1161 if (flags & NVME_SUBMIT_RESERVED)
1162 blk_flags |= BLK_MQ_REQ_RESERVED;
39dfe844 1163 if (qid == NVME_QID_ANY)
bd2687f2 1164 req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
39dfe844 1165 else
bd2687f2 1166 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
b10907b8 1167 qid - 1);
e559398f 1168
4160982e
CH
1169 if (IS_ERR(req))
1170 return PTR_ERR(req);
e559398f 1171 nvme_init_request(req, cmd);
48dae466
HR
1172 if (flags & NVME_SUBMIT_RETRY)
1173 req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
4160982e 1174
21d34711 1175 if (buffer && bufflen) {
af78428e 1176 ret = blk_rq_map_kern(req, buffer, bufflen, GFP_KERNEL);
21d34711
CH
1177 if (ret)
1178 goto out;
4160982e
CH
1179 }
1180
bd2687f2 1181 ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
ae5e6886 1182 if (result && ret >= 0)
d49187e9 1183 *result = nvme_req(req)->result;
4160982e
CH
1184 out:
1185 blk_mq_free_request(req);
1186 return ret;
1187}
eb71f435 1188EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
4160982e
CH
1189
1190int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1191 void *buffer, unsigned bufflen)
1192{
6b46fa02 1193 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
bd2687f2 1194 NVME_QID_ANY, 0);
4160982e 1195}
576d55d6 1196EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
4160982e 1197
df21b6b1
LG
1198u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1199{
1200 u32 effects = 0;
1201
1202 if (ns) {
cc115cbe 1203 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
df21b6b1 1204 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
ed4a854b 1205 dev_warn_once(ctrl->device,
831ed60c 1206 "IO command:%02x has unusual effects:%08x\n",
ed4a854b 1207 opcode, effects);
df21b6b1 1208
831ed60c
CH
1209 /*
1210 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
1211 * which would deadlock when done on an I/O command. Note that
1212 * We already warn about an unusual effect above.
1213 */
1214 effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
1215 } else {
cc115cbe 1216 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
29f69753
KB
1217
1218 /* Ignore execution restrictions if any relaxation bits are set */
1219 if (effects & NVME_CMD_EFFECTS_CSER_MASK)
1220 effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
831ed60c 1221 }
df21b6b1
LG
1222
1223 return effects;
1224}
cdd30ebb 1225EXPORT_SYMBOL_NS_GPL(nvme_command_effects, "NVME_TARGET_PASSTHRU");
df21b6b1 1226
62281b9e 1227u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
df21b6b1
LG
1228{
1229 u32 effects = nvme_command_effects(ctrl, ns, opcode);
1230
1231 /*
1232 * For simplicity, IO to all namespaces is quiesced even if the command
1233 * effects say only one namespace is affected.
1234 */
af0f446d 1235 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
df21b6b1
LG
1236 mutex_lock(&ctrl->scan_lock);
1237 mutex_lock(&ctrl->subsys->lock);
1238 nvme_mpath_start_freeze(ctrl->subsys);
1239 nvme_mpath_wait_freeze(ctrl->subsys);
1240 nvme_start_freeze(ctrl);
1241 nvme_wait_freeze(ctrl);
1242 }
1243 return effects;
1244}
cdd30ebb 1245EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, "NVME_TARGET_PASSTHRU");
df21b6b1 1246
31a59782 1247void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
bc8fb906 1248 struct nvme_command *cmd, int status)
df21b6b1 1249{
af0f446d 1250 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
df21b6b1
LG
1251 nvme_unfreeze(ctrl);
1252 nvme_mpath_unfreeze(ctrl->subsys);
1253 mutex_unlock(&ctrl->subsys->lock);
df21b6b1
LG
1254 mutex_unlock(&ctrl->scan_lock);
1255 }
1e37a307 1256 if (effects & NVME_CMD_EFFECTS_CCC) {
d0dd594b
BL
1257 if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY,
1258 &ctrl->flags)) {
1259 dev_info(ctrl->device,
1e37a307 1260"controller capabilities changed, reset may be required to take effect.\n");
d0dd594b 1261 }
1e37a307 1262 }
df21b6b1
LG
1263 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1264 nvme_queue_scan(ctrl);
1265 flush_work(&ctrl->scan_work);
1266 }
31a59782 1267 if (ns)
1268 return;
b58da2d2
TS
1269
1270 switch (cmd->common.opcode) {
1271 case nvme_admin_set_features:
1272 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1273 case NVME_FEAT_KATO:
1274 /*
1275 * Keep alive commands interval on the host should be
1276 * updated when KATO is modified by Set Features
1277 * commands.
1278 */
1279 if (!status)
1280 nvme_update_keep_alive(ctrl, cmd);
1281 break;
1282 default:
1283 break;
1284 }
1285 break;
1286 default:
1287 break;
1288 }
df21b6b1 1289}
cdd30ebb 1290EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, "NVME_TARGET_PASSTHRU");
df21b6b1 1291
a70b81bd
HR
1292/*
1293 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
d89a5c67 1294 *
a70b81bd
HR
1295 * The host should send Keep Alive commands at half of the Keep Alive Timeout
1296 * accounting for transport roundtrip times [..].
1297 */
ea4d453b
US
1298static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
1299{
1300 unsigned long delay = ctrl->kato * HZ / 2;
1301
1302 /*
1303 * When using Traffic Based Keep Alive, we need to run
1304 * nvme_keep_alive_work at twice the normal frequency, as one
1305 * command completion can postpone sending a keep alive command
1306 * by up to twice the delay between runs.
1307 */
1308 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
1309 delay /= 2;
1310 return delay;
1311}
1312
a70b81bd 1313static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
4160982e 1314{
136cfcb8
MD
1315 unsigned long now = jiffies;
1316 unsigned long delay = nvme_keep_alive_work_period(ctrl);
1317 unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay;
1318
1319 if (time_after(now, ka_next_check_tm))
1320 delay = 0;
1321 else
1322 delay = ka_next_check_tm - now;
1323
1324 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
21d34711
CH
1325}
1326
84488282
NS
1327static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
1328 blk_status_t status)
038bd4cb 1329{
84488282 1330 struct nvme_ctrl *ctrl = rq->end_io_data;
c7275ce6
US
1331 unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
1332 unsigned long delay = nvme_keep_alive_work_period(ctrl);
599d9f3a 1333 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
c7275ce6
US
1334
1335 /*
1336 * Subtract off the keepalive RTT so nvme_keep_alive_work runs
1337 * at the desired frequency.
1338 */
1339 if (rtt <= delay) {
1340 delay -= rtt;
1341 } else {
1342 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
1343 jiffies_to_msecs(rtt));
1344 delay = 0;
1345 }
038bd4cb 1346
84488282
NS
1347 blk_mq_free_request(rq);
1348
2a842aca 1349 if (status) {
038bd4cb 1350 dev_err(ctrl->device,
2a842aca
CH
1351 "failed nvme_keep_alive_end_io error=%d\n",
1352 status);
84488282 1353 return RQ_END_IO_NONE;
038bd4cb
SG
1354 }
1355
774a9636 1356 ctrl->ka_last_check_time = jiffies;
6e3ca03e 1357 ctrl->comp_seen = false;
599d9f3a 1358 if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING)
c7275ce6 1359 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
84488282 1360 return RQ_END_IO_NONE;
038bd4cb
SG
1361}
1362
038bd4cb
SG
1363static void nvme_keep_alive_work(struct work_struct *work)
1364{
1365 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1366 struct nvme_ctrl, ka_work);
6e3ca03e 1367 bool comp_seen = ctrl->comp_seen;
06c3c336 1368 struct request *rq;
6e3ca03e 1369
774a9636
US
1370 ctrl->ka_last_check_time = jiffies;
1371
6e3ca03e
SG
1372 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1373 dev_dbg(ctrl->device,
1374 "reschedule traffic based keep-alive timer\n");
1375 ctrl->comp_seen = false;
a70b81bd 1376 nvme_queue_keep_alive_work(ctrl);
6e3ca03e
SG
1377 return;
1378 }
038bd4cb 1379
e559398f
CH
1380 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
1381 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
06c3c336 1382 if (IS_ERR(rq)) {
038bd4cb 1383 /* allocation failure, reset the controller */
985c5a32 1384 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
39bdc590 1385 nvme_reset_ctrl(ctrl);
038bd4cb
SG
1386 return;
1387 }
e559398f 1388 nvme_init_request(rq, &ctrl->ka_cmd);
06c3c336
CH
1389
1390 rq->timeout = ctrl->kato * HZ;
84488282
NS
1391 rq->end_io = nvme_keep_alive_end_io;
1392 rq->end_io_data = ctrl;
1393 blk_execute_rq_nowait(rq, false);
038bd4cb
SG
1394}
1395
00b683db 1396static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
038bd4cb
SG
1397{
1398 if (unlikely(ctrl->kato == 0))
1399 return;
1400
a70b81bd 1401 nvme_queue_keep_alive_work(ctrl);
038bd4cb 1402}
038bd4cb
SG
1403
1404void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1405{
1406 if (unlikely(ctrl->kato == 0))
1407 return;
1408
1409 cancel_delayed_work_sync(&ctrl->ka_work);
1410}
1411EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1412
b58da2d2
TS
1413static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
1414 struct nvme_command *cmd)
1415{
1416 unsigned int new_kato =
1417 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1418
1419 dev_info(ctrl->device,
1420 "keep alive interval updated from %u ms to %u ms\n",
1421 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1422
1423 nvme_stop_keep_alive(ctrl);
1424 ctrl->kato = new_kato;
1425 nvme_start_keep_alive(ctrl);
1426}
1427
f54f0d0e 1428static bool nvme_id_cns_ok(struct nvme_ctrl *ctrl, u8 cns)
b9a5c3d4 1429{
f54f0d0e
KB
1430 /*
1431 * The CNS field occupies a full byte starting with NVMe 1.2
1432 */
1433 if (ctrl->vs >= NVME_VS(1, 2, 0))
1434 return true;
1435
1436 /*
1437 * NVMe 1.1 expanded the CNS value to two bits, which means values
1438 * larger than that could get truncated and treated as an incorrect
1439 * value.
1440 *
1441 * Qemu implemented 1.0 behavior for controllers claiming 1.1
1442 * compliance, so they need to be quirked here.
1443 */
1444 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1445 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS))
1446 return cns <= 3;
1447
1448 /*
1449 * NVMe 1.0 used a single bit for the CNS value.
1450 */
1451 return cns <= 1;
b9a5c3d4
CH
1452}
1453
3f7f25a9 1454static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
21d34711
CH
1455{
1456 struct nvme_command c = { };
1457 int error;
1458
1459 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1460 c.identify.opcode = nvme_admin_identify;
986994a2 1461 c.identify.cns = NVME_ID_CNS_CTRL;
21d34711
CH
1462
1463 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1464 if (!*id)
1465 return -ENOMEM;
1466
1467 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1468 sizeof(struct nvme_id_ctrl));
7e80eb79 1469 if (error) {
21d34711 1470 kfree(*id);
7e80eb79
KB
1471 *id = NULL;
1472 }
21d34711
CH
1473 return error;
1474}
1475
ad95a613 1476static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
71010c30 1477 struct nvme_ns_id_desc *cur, bool *csi_seen)
ad95a613
CK
1478{
1479 const char *warn_str = "ctrl returned bogus length:";
1480 void *data = cur;
1481
1482 switch (cur->nidt) {
1483 case NVME_NIDT_EUI64:
1484 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1485 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1486 warn_str, cur->nidl);
1487 return -1;
1488 }
00ff400e
CH
1489 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1490 return NVME_NIDT_EUI64_LEN;
ad95a613
CK
1491 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1492 return NVME_NIDT_EUI64_LEN;
1493 case NVME_NIDT_NGUID:
1494 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1495 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1496 warn_str, cur->nidl);
1497 return -1;
1498 }
00ff400e
CH
1499 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1500 return NVME_NIDT_NGUID_LEN;
ad95a613
CK
1501 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1502 return NVME_NIDT_NGUID_LEN;
1503 case NVME_NIDT_UUID:
1504 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1505 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1506 warn_str, cur->nidl);
1507 return -1;
1508 }
00ff400e
CH
1509 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1510 return NVME_NIDT_UUID_LEN;
ad95a613
CK
1511 uuid_copy(&ids->uuid, data + sizeof(*cur));
1512 return NVME_NIDT_UUID_LEN;
71010c30
NC
1513 case NVME_NIDT_CSI:
1514 if (cur->nidl != NVME_NIDT_CSI_LEN) {
1515 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1516 warn_str, cur->nidl);
1517 return -1;
1518 }
1519 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1520 *csi_seen = true;
1521 return NVME_NIDT_CSI_LEN;
ad95a613
CK
1522 default:
1523 /* Skip unknown types */
1524 return cur->nidl;
1525 }
1526}
1527
1a893c2b
CH
1528static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
1529 struct nvme_ns_info *info)
3b22ba26
JT
1530{
1531 struct nvme_command c = { };
71010c30
NC
1532 bool csi_seen = false;
1533 int status, pos, len;
3b22ba26 1534 void *data;
3b22ba26 1535
8b7c0ff2
CH
1536 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1537 return 0;
5bedd3af
CH
1538 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1539 return 0;
1540
3b22ba26 1541 c.identify.opcode = nvme_admin_identify;
1a893c2b 1542 c.identify.nsid = cpu_to_le32(info->nsid);
3b22ba26
JT
1543 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1544
1545 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1546 if (!data)
1547 return -ENOMEM;
1548
cdbff4f2 1549 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
3b22ba26 1550 NVME_IDENTIFY_DATA_SIZE);
fb314eb0
CH
1551 if (status) {
1552 dev_warn(ctrl->device,
aa9d7295 1553 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1a893c2b 1554 info->nsid, status);
3b22ba26 1555 goto free_data;
fb314eb0 1556 }
3b22ba26
JT
1557
1558 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1559 struct nvme_ns_id_desc *cur = data + pos;
1560
1561 if (cur->nidl == 0)
1562 break;
1563
1a893c2b 1564 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
ad95a613 1565 if (len < 0)
71010c30 1566 break;
3b22ba26
JT
1567
1568 len += sizeof(*cur);
1569 }
71010c30
NC
1570
1571 if (nvme_multi_css(ctrl) && !csi_seen) {
1572 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1a893c2b 1573 info->nsid);
71010c30
NC
1574 status = -EINVAL;
1575 }
1576
3b22ba26
JT
1577free_data:
1578 kfree(data);
1579 return status;
1580}
1581
a1a825ab 1582int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1a893c2b 1583 struct nvme_id_ns **id)
21d34711
CH
1584{
1585 struct nvme_command c = { };
1586 int error;
1587
1588 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
778f067c
MG
1589 c.identify.opcode = nvme_admin_identify;
1590 c.identify.nsid = cpu_to_le32(nsid);
986994a2 1591 c.identify.cns = NVME_ID_CNS_NS;
21d34711 1592
331813f6
SG
1593 *id = kmalloc(sizeof(**id), GFP_KERNEL);
1594 if (!*id)
1595 return -ENOMEM;
21d34711 1596
331813f6 1597 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
cdbff4f2 1598 if (error) {
d0de579c 1599 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
0dd6fff2 1600 kfree(*id);
7e80eb79 1601 *id = NULL;
cdbff4f2 1602 }
1a893c2b
CH
1603 return error;
1604}
00ff400e 1605
1a893c2b
CH
1606static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
1607 struct nvme_ns_info *info)
1608{
1609 struct nvme_ns_ids *ids = &info->ids;
1610 struct nvme_id_ns *id;
1611 int ret;
1612
1613 ret = nvme_identify_ns(ctrl, info->nsid, &id);
1614 if (ret)
1615 return ret;
0dd6fff2
CH
1616
1617 if (id->ncap == 0) {
1618 /* namespace not allocated or attached */
1619 info->is_removed = true;
e3139cef
ML
1620 ret = -ENODEV;
1621 goto error;
0dd6fff2
CH
1622 }
1623
1a893c2b
CH
1624 info->anagrpid = id->anagrpid;
1625 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1e4ea66a 1626 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1a893c2b 1627 info->is_ready = true;
30b5f20b 1628 info->endgid = le16_to_cpu(id->endgid);
00ff400e
CH
1629 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1630 dev_info(ctrl->device,
1631 "Ignoring bogus Namespace Identifiers\n");
1632 } else {
1633 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1634 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1a893c2b 1635 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
00ff400e
CH
1636 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1637 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1a893c2b 1638 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
00ff400e 1639 }
e3139cef
ML
1640
1641error:
1a893c2b 1642 kfree(id);
e3139cef 1643 return ret;
21d34711
CH
1644}
1645
1a893c2b
CH
1646static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
1647 struct nvme_ns_info *info)
354201c5 1648{
1a893c2b 1649 struct nvme_id_ns_cs_indep *id;
354201c5
CH
1650 struct nvme_command c = {
1651 .identify.opcode = nvme_admin_identify,
1a893c2b 1652 .identify.nsid = cpu_to_le32(info->nsid),
354201c5
CH
1653 .identify.cns = NVME_ID_CNS_NS_CS_INDEP,
1654 };
1655 int ret;
1656
1a893c2b
CH
1657 id = kmalloc(sizeof(*id), GFP_KERNEL);
1658 if (!id)
354201c5
CH
1659 return -ENOMEM;
1660
1a893c2b
CH
1661 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1662 if (!ret) {
1663 info->anagrpid = id->anagrpid;
1664 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1e4ea66a 1665 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1a893c2b 1666 info->is_ready = id->nstat & NVME_NSTAT_NRDY;
1d811438 1667 info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL;
8a825d22 1668 info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT;
30b5f20b 1669 info->endgid = le16_to_cpu(id->endgid);
354201c5 1670 }
1a893c2b
CH
1671 kfree(id);
1672 return ret;
354201c5
CH
1673}
1674
1a87ee65
KB
1675static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1676 unsigned int dword11, void *buffer, size_t buflen, u32 *result)
21d34711 1677{
15755854 1678 union nvme_result res = { 0 };
cc72c442 1679 struct nvme_command c = { };
1cb3cce5 1680 int ret;
21d34711 1681
1a87ee65 1682 c.features.opcode = op;
21d34711
CH
1683 c.features.fid = cpu_to_le32(fid);
1684 c.features.dword11 = cpu_to_le32(dword11);
1685
d49187e9 1686 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
bd2687f2 1687 buffer, buflen, NVME_QID_ANY, 0);
9b47f77a 1688 if (ret >= 0 && result)
d49187e9 1689 *result = le32_to_cpu(res.u32);
1cb3cce5 1690 return ret;
21d34711
CH
1691}
1692
1a87ee65
KB
1693int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1694 unsigned int dword11, void *buffer, size_t buflen,
7a044d34 1695 void *result)
1a87ee65
KB
1696{
1697 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1698 buflen, result);
1699}
1700EXPORT_SYMBOL_GPL(nvme_set_features);
1701
1702int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1703 unsigned int dword11, void *buffer, size_t buflen,
7a044d34 1704 void *result)
1a87ee65
KB
1705{
1706 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1707 buflen, result);
1708}
1709EXPORT_SYMBOL_GPL(nvme_get_features);
1710
9a0be7ab
CH
1711int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1712{
1713 u32 q_count = (*count - 1) | ((*count - 1) << 16);
1714 u32 result;
1715 int status, nr_io_queues;
1716
1a6fe74d 1717 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
9a0be7ab 1718 &result);
294b2b75
DW
1719
1720 /*
1721 * It's either a kernel error or the host observed a connection
1722 * lost. In either case it's not possible communicate with the
1723 * controller and thus enter the error code path.
1724 */
1725 if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
9a0be7ab
CH
1726 return status;
1727
f5fa90dc
CH
1728 /*
1729 * Degraded controllers might return an error when setting the queue
1730 * count. We still want to be able to bring them online and offer
1731 * access to the admin queue, as that might be only way to fix them up.
1732 */
1733 if (status > 0) {
f0425db0 1734 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
f5fa90dc
CH
1735 *count = 0;
1736 } else {
1737 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1738 *count = min(*count, nr_io_queues);
1739 }
1740
9a0be7ab
CH
1741 return 0;
1742}
576d55d6 1743EXPORT_SYMBOL_GPL(nvme_set_queue_count);
9a0be7ab 1744
c0561f82 1745#define NVME_AEN_SUPPORTED \
85f8a435
SG
1746 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1747 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
c0561f82
HR
1748
1749static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1750{
fa441b71 1751 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
c0561f82
HR
1752 int status;
1753
fa441b71
WZ
1754 if (!supported_aens)
1755 return;
1756
1757 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1758 NULL, 0, &result);
c0561f82
HR
1759 if (status)
1760 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
fa441b71 1761 supported_aens);
93da4023
SG
1762
1763 queue_work(nvme_wq, &ctrl->async_event_work);
c0561f82
HR
1764}
1765
f5b9a51d 1766static int nvme_ns_open(struct nvme_ns *ns)
c225b610 1767{
c225b610 1768
32acab31 1769 /* should never be called due to GENHD_FL_HIDDEN */
30897388 1770 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
85088c4a 1771 goto fail;
4c74d1f8 1772 if (!nvme_get_ns(ns))
85088c4a
NC
1773 goto fail;
1774 if (!try_module_get(ns->ctrl->ops->module))
1775 goto fail_put_ns;
1776
c6424a90 1777 return 0;
85088c4a
NC
1778
1779fail_put_ns:
1780 nvme_put_ns(ns);
1781fail:
1782 return -ENXIO;
1673f1f0
CH
1783}
1784
f5b9a51d 1785static void nvme_ns_release(struct nvme_ns *ns)
1673f1f0 1786{
85088c4a
NC
1787
1788 module_put(ns->ctrl->ops->module);
1789 nvme_put_ns(ns);
1673f1f0
CH
1790}
1791
05bdb996 1792static int nvme_open(struct gendisk *disk, blk_mode_t mode)
f5b9a51d 1793{
d32e2bf8 1794 return nvme_ns_open(disk->private_data);
f5b9a51d
CH
1795}
1796
ae220766 1797static void nvme_release(struct gendisk *disk)
f5b9a51d
CH
1798{
1799 nvme_ns_release(disk->private_data);
1800}
1801
1496bd49 1802int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1673f1f0
CH
1803{
1804 /* some standard values */
1805 geo->heads = 1 << 6;
1806 geo->sectors = 1 << 5;
1807 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1808 return 0;
1809}
1810
7ec5bd24 1811static bool nvme_init_integrity(struct nvme_ns_head *head,
6339b7ed 1812 struct queue_limits *lim, struct nvme_ns_info *info)
1673f1f0 1813{
c6e56cf6 1814 struct blk_integrity *bi = &lim->integrity;
1673f1f0 1815
c6e56cf6 1816 memset(bi, 0, sizeof(*bi));
414c62e2 1817
f467b48e
CH
1818 if (!head->ms)
1819 return true;
1820
1821 /*
1822 * PI can always be supported as we can ask the controller to simply
1823 * insert/strip it, which is not possible for other kinds of metadata.
1824 */
1825 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) ||
1826 !(head->features & NVME_NS_METADATA_SUPPORTED))
1827 return nvme_ns_has_pi(head);
1828
d386aedc 1829 switch (head->pi_type) {
1673f1f0 1830 case NVME_NS_DPS_PI_TYPE3:
d386aedc 1831 switch (head->guard_type) {
4020aad8 1832 case NVME_NVM_NS_16B_GUARD:
c6e56cf6
CH
1833 bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
1834 bi->tag_size = sizeof(u16) + sizeof(u32);
1835 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
4020aad8
KB
1836 break;
1837 case NVME_NVM_NS_64B_GUARD:
c6e56cf6
CH
1838 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
1839 bi->tag_size = sizeof(u16) + 6;
1840 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
4020aad8
KB
1841 break;
1842 default:
4020aad8
KB
1843 break;
1844 }
1673f1f0
CH
1845 break;
1846 case NVME_NS_DPS_PI_TYPE1:
1847 case NVME_NS_DPS_PI_TYPE2:
d386aedc 1848 switch (head->guard_type) {
4020aad8 1849 case NVME_NVM_NS_16B_GUARD:
c6e56cf6
CH
1850 bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
1851 bi->tag_size = sizeof(u16);
1852 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
1853 BLK_INTEGRITY_REF_TAG;
4020aad8
KB
1854 break;
1855 case NVME_NVM_NS_64B_GUARD:
c6e56cf6
CH
1856 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
1857 bi->tag_size = sizeof(u16);
1858 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
1859 BLK_INTEGRITY_REF_TAG;
4020aad8
KB
1860 break;
1861 default:
4020aad8
KB
1862 break;
1863 }
1673f1f0
CH
1864 break;
1865 default:
1673f1f0
CH
1866 break;
1867 }
4020aad8 1868
c6e56cf6 1869 bi->tuple_size = head->ms;
6339b7ed 1870 bi->pi_offset = info->pi_offset;
f467b48e 1871 return true;
1673f1f0 1872}
1673f1f0 1873
e6c9b130 1874static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
1673f1f0 1875{
e6c9b130 1876 struct nvme_ctrl *ctrl = ns->ctrl;
3831761e 1877
e6c9b130
CH
1878 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
1879 lim->max_hw_discard_sectors =
1880 nvme_lba_to_sect(ns->head, ctrl->dmrsl);
1881 else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
1882 lim->max_hw_discard_sectors = UINT_MAX;
1883 else
1884 lim->max_hw_discard_sectors = 0;
1885
1886 lim->discard_granularity = lim->logical_block_size;
3831761e 1887
3b946fe1 1888 if (ctrl->dmrl)
e6c9b130 1889 lim->max_discard_segments = ctrl->dmrl;
3b946fe1 1890 else
e6c9b130 1891 lim->max_discard_segments = NVME_DSM_MAX_RANGES;
1673f1f0
CH
1892}
1893
002fab04
CH
1894static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1895{
1896 return uuid_equal(&a->uuid, &b->uuid) &&
1897 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
71010c30
NC
1898 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1899 a->csi == b->csi;
002fab04
CH
1900}
1901
c6fce9f1
CH
1902static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid,
1903 struct nvme_id_ns_nvm **nvmp)
d4609ea8 1904{
c6fce9f1
CH
1905 struct nvme_command c = {
1906 .identify.opcode = nvme_admin_identify,
1907 .identify.nsid = cpu_to_le32(nsid),
1908 .identify.cns = NVME_ID_CNS_CS_NS,
1909 .identify.csi = NVME_CSI_NVM,
1910 };
4020aad8 1911 struct nvme_id_ns_nvm *nvm;
c6fce9f1 1912 int ret;
d4609ea8 1913
4020aad8
KB
1914 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
1915 if (!nvm)
1916 return -ENOMEM;
d4609ea8 1917
d386aedc 1918 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
4020aad8 1919 if (ret)
c6fce9f1
CH
1920 kfree(nvm);
1921 else
1922 *nvmp = nvm;
1923 return ret;
1924}
4020aad8 1925
27cb91a3 1926static void nvme_configure_pi_elbas(struct nvme_ns_head *head,
e5ea00a5 1927 struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm)
d4609ea8 1928{
27cb91a3 1929 u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]);
415fb383 1930 u8 guard_type;
4020aad8
KB
1931
1932 /* no support for storage tag formats right now */
1933 if (nvme_elbaf_sts(elbaf))
27cb91a3 1934 return;
4020aad8 1935
415fb383
FP
1936 guard_type = nvme_elbaf_guard_type(elbaf);
1937 if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) &&
1938 guard_type == NVME_NVM_NS_QTYPE_GUARD)
1939 guard_type = nvme_elbaf_qualified_guard_type(elbaf);
1940
1941 head->guard_type = guard_type;
d386aedc 1942 switch (head->guard_type) {
4020aad8 1943 case NVME_NVM_NS_64B_GUARD:
d386aedc 1944 head->pi_size = sizeof(struct crc64_pi_tuple);
4020aad8
KB
1945 break;
1946 case NVME_NVM_NS_16B_GUARD:
d386aedc 1947 head->pi_size = sizeof(struct t10_pi_tuple);
4020aad8
KB
1948 break;
1949 default:
1950 break;
1951 }
4020aad8
KB
1952}
1953
e5ea00a5
CH
1954static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
1955 struct nvme_ns_head *head, struct nvme_id_ns *id,
6339b7ed 1956 struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info)
4020aad8 1957{
d386aedc 1958 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
27cb91a3
CH
1959 head->pi_type = 0;
1960 head->pi_size = 0;
27cb91a3 1961 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms);
d386aedc 1962 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
e5ea00a5 1963 return;
363f6368 1964
27cb91a3
CH
1965 if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
1966 nvme_configure_pi_elbas(head, id, nvm);
1967 } else {
1968 head->pi_size = sizeof(struct t10_pi_tuple);
1969 head->guard_type = NVME_NVM_NS_16B_GUARD;
1970 }
1971
1972 if (head->pi_size && head->ms >= head->pi_size)
1973 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
42ab37ea
KB
1974 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) {
1975 if (disable_pi_offsets)
1976 head->pi_type = 0;
1977 else
1978 info->pi_offset = head->ms - head->pi_size;
1979 }
363f6368 1980
d4609ea8
CH
1981 if (ctrl->ops->flags & NVME_F_FABRICS) {
1982 /*
1983 * The NVMe over Fabrics specification only supports metadata as
1984 * part of the extended data LBA. We rely on HCA/HBA support to
1985 * remap the separate metadata buffer from the block layer.
1986 */
1987 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
e5ea00a5 1988 return;
d39ad2a4 1989
d386aedc 1990 head->features |= NVME_NS_EXT_LBAS;
d39ad2a4
KB
1991
1992 /*
1993 * The current fabrics transport drivers support namespace
1994 * metadata formats only if nvme_ns_has_pi() returns true.
1995 * Suppress support for all other formats so the namespace will
1996 * have a 0 capacity and not be usable through the block stack.
1997 *
1998 * Note, this check will need to be modified if any drivers
1999 * gain the ability to use other metadata formats.
2000 */
d386aedc
DW
2001 if (ctrl->max_integrity_segments && nvme_ns_has_pi(head))
2002 head->features |= NVME_NS_METADATA_SUPPORTED;
d4609ea8
CH
2003 } else {
2004 /*
2005 * For PCIe controllers, we can't easily remap the separate
2006 * metadata buffer from the block layer and thus require a
2007 * separate metadata buffer for block layer metadata/PI support.
2008 * We allow extended LBAs for the passthrough interface, though.
2009 */
2010 if (id->flbas & NVME_NS_FLBAS_META_EXT)
d386aedc 2011 head->features |= NVME_NS_EXT_LBAS;
d4609ea8 2012 else
d386aedc 2013 head->features |= NVME_NS_METADATA_SUPPORTED;
d4609ea8 2014 }
d4609ea8
CH
2015}
2016
5f9bbea0 2017
b2e607fe
CH
2018static u32 nvme_configure_atomic_write(struct nvme_ns *ns,
2019 struct nvme_id_ns *id, struct queue_limits *lim, u32 bs)
5f9bbea0 2020{
b2e607fe 2021 u32 atomic_bs, boundary = 0;
5f9bbea0 2022
b2e607fe
CH
2023 /*
2024 * We do not support an offset for the atomic boundaries.
2025 */
2026 if (id->nabo)
2027 return bs;
2028
2029 if ((id->nsfeat & NVME_NS_FEAT_ATOMICS) && id->nawupf) {
2030 /*
2031 * Use the per-namespace atomic write unit when available.
2032 */
2033 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
2034 if (id->nabspf)
5f9bbea0 2035 boundary = (le16_to_cpu(id->nabspf) + 1) * bs;
b2e607fe
CH
2036 } else {
2037 /*
2038 * Use the controller wide atomic write unit. This sucks
2039 * because the limit is defined in terms of logical blocks while
2040 * namespaces can have different formats, and because there is
2041 * no clear language in the specification prohibiting different
2042 * values for different controllers in the subsystem.
2043 */
f46d2734 2044 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
5f9bbea0 2045 }
b2e607fe 2046
5f9bbea0
AA
2047 lim->atomic_write_hw_max = atomic_bs;
2048 lim->atomic_write_hw_boundary = boundary;
2049 lim->atomic_write_hw_unit_min = bs;
2050 lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
6a7e17b2 2051 lim->features |= BLK_FEAT_ATOMIC_WRITES;
b2e607fe 2052 return atomic_bs;
5f9bbea0
AA
2053}
2054
152694c8 2055static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
658d9f7c 2056{
152694c8
CH
2057 return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
2058}
658d9f7c 2059
e6c9b130
CH
2060static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
2061 struct queue_limits *lim)
658d9f7c 2062{
e6c9b130
CH
2063 lim->max_hw_sectors = ctrl->max_hw_sectors;
2064 lim->max_segments = min_t(u32, USHRT_MAX,
2065 min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
2066 lim->max_integrity_segments = ctrl->max_integrity_segments;
2067 lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1;
2068 lim->max_segment_size = UINT_MAX;
2069 lim->dma_alignment = 3;
658d9f7c
CH
2070}
2071
e6c9b130
CH
2072static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
2073 struct queue_limits *lim)
24b0b58c 2074{
a5b1cd61 2075 struct nvme_ns_head *head = ns->head;
d386aedc 2076 u32 bs = 1U << head->lba_shift;
68ab60ca 2077 u32 atomic_bs, phys_bs, io_opt = 0;
a5b1cd61 2078 bool valid = true;
24b0b58c 2079
13f0b26b
CH
2080 /*
2081 * The block layer can't support LBA sizes larger than the page size
74fbc88e
KB
2082 * or smaller than a sector size yet, so catch this early and don't
2083 * allow block I/O.
13f0b26b 2084 */
51588b1b 2085 if (blk_validate_block_size(bs)) {
01fa0174 2086 bs = (1 << 9);
a5b1cd61 2087 valid = false;
01fa0174 2088 }
f9d5f457 2089
b2e607fe
CH
2090 phys_bs = bs;
2091 atomic_bs = nvme_configure_atomic_write(ns, id, lim, bs);
31fdad7b 2092
92decf11 2093 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
81adb863 2094 /* NPWG = Namespace Preferred Write Granularity */
31fdad7b 2095 phys_bs = bs * (1 + le16_to_cpu(id->npwg));
81adb863 2096 /* NOWS = Namespace Optimal Write Size */
f3bf25d5
CH
2097 if (id->nows)
2098 io_opt = bs * (1 + le16_to_cpu(id->nows));
81adb863
BVA
2099 }
2100
81adb863
BVA
2101 /*
2102 * Linux filesystems assume writing a single physical block is
2103 * an atomic operation. Hence limit the physical block size to the
2104 * value of the Atomic Write Unit Power Fail parameter.
2105 */
e6c9b130
CH
2106 lim->logical_block_size = bs;
2107 lim->physical_block_size = min(phys_bs, atomic_bs);
2108 lim->io_min = phys_bs;
2109 lim->io_opt = io_opt;
58a0c875
CH
2110 if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
2111 (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
e6c9b130 2112 lim->max_write_zeroes_sectors = UINT_MAX;
63dfa100 2113 else
e6c9b130 2114 lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
a5b1cd61 2115 return valid;
24b0b58c
CH
2116}
2117
1e4ea66a
CH
2118static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
2119{
2120 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
2121}
2122
e83d776f
KB
2123static inline bool nvme_first_scan(struct gendisk *disk)
2124{
2125 /* nvme_alloc_ns() scans the disk prior to adding it */
50b4aecf 2126 return !disk_live(disk);
e83d776f
KB
2127}
2128
e6c9b130
CH
2129static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id,
2130 struct queue_limits *lim)
e83d776f
KB
2131{
2132 struct nvme_ctrl *ctrl = ns->ctrl;
2133 u32 iob;
2134
2135 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
2136 is_power_of_2(ctrl->max_hw_sectors))
2137 iob = ctrl->max_hw_sectors;
2138 else
0372dd4e 2139 iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob));
e83d776f
KB
2140
2141 if (!iob)
2142 return;
2143
2144 if (!is_power_of_2(iob)) {
2145 if (nvme_first_scan(ns->disk))
2146 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
2147 ns->disk->disk_name, iob);
2148 return;
2149 }
2150
2151 if (blk_queue_is_zoned(ns->disk->queue)) {
2152 if (nvme_first_scan(ns->disk))
2153 pr_warn("%s: ignoring zoned namespace IO boundary\n",
2154 ns->disk->disk_name);
2155 return;
2156 }
2157
e6c9b130 2158 lim->chunk_sectors = iob;
e83d776f
KB
2159}
2160
eb867ee9
JG
2161static int nvme_update_ns_info_generic(struct nvme_ns *ns,
2162 struct nvme_ns_info *info)
2163{
e6c9b130 2164 struct queue_limits lim;
1e1a9cec 2165 unsigned int memflags;
e6c9b130
CH
2166 int ret;
2167
e6c9b130
CH
2168 lim = queue_limits_start_update(ns->disk->queue);
2169 nvme_set_ctrl_limits(ns->ctrl, &lim);
473106dd 2170
1e1a9cec 2171 memflags = blk_mq_freeze_queue(ns->disk->queue);
e6c9b130 2172 ret = queue_limits_commit_update(ns->disk->queue, &lim);
eb867ee9 2173 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
1e1a9cec 2174 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
eb867ee9 2175
eb867ee9 2176 /* Hide the block-interface for these devices */
e6c9b130
CH
2177 if (!ret)
2178 ret = -ENODEV;
2179 return ret;
eb867ee9
JG
2180}
2181
30b5f20b
KB
2182static int nvme_query_fdp_granularity(struct nvme_ctrl *ctrl,
2183 struct nvme_ns_info *info, u8 fdp_idx)
2184{
2185 struct nvme_fdp_config_log hdr, *h;
2186 struct nvme_fdp_config_desc *desc;
2187 size_t size = sizeof(hdr);
2188 void *log, *end;
2189 int i, n, ret;
2190
2191 ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0,
2192 NVME_CSI_NVM, &hdr, size, 0, info->endgid);
2193 if (ret) {
2194 dev_warn(ctrl->device,
2195 "FDP configs log header status:0x%x endgid:%d\n", ret,
2196 info->endgid);
2197 return ret;
2198 }
2199
2200 size = le32_to_cpu(hdr.sze);
2201 if (size > PAGE_SIZE * MAX_ORDER_NR_PAGES) {
2202 dev_warn(ctrl->device, "FDP config size too large:%zu\n",
2203 size);
2204 return 0;
2205 }
2206
2207 h = kvmalloc(size, GFP_KERNEL);
2208 if (!h)
2209 return -ENOMEM;
2210
2211 ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0,
2212 NVME_CSI_NVM, h, size, 0, info->endgid);
2213 if (ret) {
2214 dev_warn(ctrl->device,
2215 "FDP configs log status:0x%x endgid:%d\n", ret,
2216 info->endgid);
2217 goto out;
2218 }
2219
2220 n = le16_to_cpu(h->numfdpc) + 1;
2221 if (fdp_idx > n) {
2222 dev_warn(ctrl->device, "FDP index:%d out of range:%d\n",
2223 fdp_idx, n);
2224 /* Proceed without registering FDP streams */
2225 ret = 0;
2226 goto out;
2227 }
2228
2229 log = h + 1;
2230 desc = log;
2231 end = log + size - sizeof(*h);
2232 for (i = 0; i < fdp_idx; i++) {
2233 log += le16_to_cpu(desc->dsze);
2234 desc = log;
2235 if (log >= end) {
2236 dev_warn(ctrl->device,
2237 "FDP invalid config descriptor list\n");
2238 ret = 0;
2239 goto out;
2240 }
2241 }
2242
2243 if (le32_to_cpu(desc->nrg) > 1) {
2244 dev_warn(ctrl->device, "FDP NRG > 1 not supported\n");
2245 ret = 0;
2246 goto out;
2247 }
2248
2249 info->runs = le64_to_cpu(desc->runs);
2250out:
2251 kvfree(h);
2252 return ret;
2253}
2254
2255static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info)
2256{
2257 struct nvme_ns_head *head = ns->head;
2258 struct nvme_ctrl *ctrl = ns->ctrl;
2259 struct nvme_fdp_ruh_status *ruhs;
2260 struct nvme_fdp_config fdp;
2261 struct nvme_command c = {};
2262 size_t size;
38e8397d 2263 int i, ret;
30b5f20b
KB
2264
2265 /*
2266 * The FDP configuration is static for the lifetime of the namespace,
2267 * so return immediately if we've already registered this namespace's
2268 * streams.
2269 */
2270 if (head->nr_plids)
2271 return 0;
2272
2273 ret = nvme_get_features(ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0,
2274 &fdp);
2275 if (ret) {
2276 dev_warn(ctrl->device, "FDP get feature status:0x%x\n", ret);
2277 return ret;
2278 }
2279
2280 if (!(fdp.flags & FDPCFG_FDPE))
2281 return 0;
2282
2283 ret = nvme_query_fdp_granularity(ctrl, info, fdp.fdpcidx);
2284 if (!info->runs)
2285 return ret;
2286
2287 size = struct_size(ruhs, ruhsd, S8_MAX - 1);
2288 ruhs = kzalloc(size, GFP_KERNEL);
2289 if (!ruhs)
2290 return -ENOMEM;
2291
2292 c.imr.opcode = nvme_cmd_io_mgmt_recv;
2293 c.imr.nsid = cpu_to_le32(head->ns_id);
2294 c.imr.mo = NVME_IO_MGMT_RECV_MO_RUHS;
2295 c.imr.numd = cpu_to_le32(nvme_bytes_to_numd(size));
2296 ret = nvme_submit_sync_cmd(ns->queue, &c, ruhs, size);
2297 if (ret) {
2298 dev_warn(ctrl->device, "FDP io-mgmt status:0x%x\n", ret);
2299 goto free;
2300 }
2301
2302 head->nr_plids = le16_to_cpu(ruhs->nruhsd);
38e8397d
KB
2303 if (!head->nr_plids)
2304 goto free;
2305
f3c308b9 2306 head->plids = kcalloc(head->nr_plids, sizeof(*head->plids),
38e8397d
KB
2307 GFP_KERNEL);
2308 if (!head->plids) {
2309 dev_warn(ctrl->device,
2310 "failed to allocate %u FDP placement IDs\n",
2311 head->nr_plids);
2312 head->nr_plids = 0;
2313 ret = -ENOMEM;
2314 goto free;
2315 }
2316
2317 for (i = 0; i < head->nr_plids; i++)
2318 head->plids[i] = le16_to_cpu(ruhs->ruhsd[i].pid);
30b5f20b
KB
2319free:
2320 kfree(ruhs);
2321 return ret;
2322}
2323
1a893c2b
CH
2324static int nvme_update_ns_info_block(struct nvme_ns *ns,
2325 struct nvme_ns_info *info)
ac81bfa9 2326{
e6c9b130 2327 struct queue_limits lim;
e5ea00a5 2328 struct nvme_id_ns_nvm *nvm = NULL;
c85c9ab9 2329 struct nvme_zone_info zi = {};
1a893c2b 2330 struct nvme_id_ns *id;
1e1a9cec 2331 unsigned int memflags;
a5b1cd61 2332 sector_t capacity;
1a893c2b 2333 unsigned lbaf;
240e6ee2 2334 int ret;
1673f1f0 2335
1a893c2b
CH
2336 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
2337 if (ret)
2338 return ret;
2339
d8b90d60
EM
2340 if (id->ncap == 0) {
2341 /* namespace not allocated or attached */
2342 info->is_removed = true;
0551ec93 2343 ret = -ENXIO;
46e7422c 2344 goto out;
d8b90d60 2345 }
c85c9ab9 2346 lbaf = nvme_lbaf_index(id->flbas);
d8b90d60 2347
e5ea00a5
CH
2348 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
2349 ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
2350 if (ret < 0)
2351 goto out;
d8b90d60
EM
2352 }
2353
c85c9ab9
CH
2354 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
2355 ns->head->ids.csi == NVME_CSI_ZNS) {
2356 ret = nvme_query_zone_info(ns, lbaf, &zi);
2357 if (ret < 0)
2358 goto out;
2359 }
2360
30b5f20b
KB
2361 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_FDPS) {
2362 ret = nvme_query_fdp_info(ns, info);
2363 if (ret < 0)
2364 goto out;
2365 }
2366
473106dd
CH
2367 lim = queue_limits_start_update(ns->disk->queue);
2368
1e1a9cec 2369 memflags = blk_mq_freeze_queue(ns->disk->queue);
9419e71b 2370 ns->head->lba_shift = id->lbaf[lbaf].ds;
a1a825ab 2371 ns->head->nuse = le64_to_cpu(id->nuse);
a5b1cd61 2372 capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
e6c9b130 2373 nvme_set_ctrl_limits(ns->ctrl, &lim);
6339b7ed 2374 nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
e6c9b130
CH
2375 nvme_set_chunk_sectors(ns, id, &lim);
2376 if (!nvme_update_disk_info(ns, id, &lim))
a5b1cd61 2377 capacity = 0;
8695f060 2378
e6c9b130
CH
2379 nvme_config_discard(ns, &lim);
2380 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
c85c9ab9
CH
2381 ns->head->ids.csi == NVME_CSI_ZNS)
2382 nvme_update_zone_info(ns, &lim, &zi);
a5b1cd61 2383
8a825d22 2384 if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc)
1122c0c1
CH
2385 lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
2386 else
2387 lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
a5b1cd61 2388
1d811438
WY
2389 if (info->is_rotational)
2390 lim.features |= BLK_FEAT_ROTATIONAL;
2391
a5b1cd61
CH
2392 /*
2393 * Register a metadata profile for PI, or the plain non-integrity NVMe
2394 * metadata masquerading as Type 0 if supported, otherwise reject block
2395 * I/O to namespaces with metadata except when the namespace supports
2396 * PI, as it can strip/insert in that case.
2397 */
6339b7ed 2398 if (!nvme_init_integrity(ns->head, &lim, info))
a5b1cd61
CH
2399 capacity = 0;
2400
30b5f20b
KB
2401 lim.max_write_streams = ns->head->nr_plids;
2402 if (lim.max_write_streams)
86b6e0bd 2403 lim.write_stream_granularity = min(info->runs, U32_MAX);
30b5f20b
KB
2404 else
2405 lim.write_stream_granularity = 0;
2406
c6e56cf6
CH
2407 ret = queue_limits_commit_update(ns->disk->queue, &lim);
2408 if (ret) {
1e1a9cec 2409 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
c6e56cf6
CH
2410 goto out;
2411 }
2412
a5b1cd61 2413 set_capacity_and_notify(ns->disk, capacity);
71010c30 2414
1b96f862
CH
2415 /*
2416 * Only set the DEAC bit if the device guarantees that reads from
2417 * deallocated data return zeroes. While the DEAC bit does not
2418 * require that, it must be a no-op if reads from deallocated data
2419 * do not return zeroes.
2420 */
2421 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
9419e71b 2422 ns->head->features |= NVME_NS_DEAC;
1e4ea66a 2423 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
e7d65803 2424 set_bit(NVME_NS_READY, &ns->flags);
1e1a9cec 2425 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
1673f1f0 2426
3a9967ba 2427 if (blk_queue_is_zoned(ns->queue)) {
9b3c08b9 2428 ret = blk_revalidate_disk_zones(ns->disk);
8685699c 2429 if (ret && !nvme_first_scan(ns->disk))
e06b425b 2430 goto out;
b29f8485
MG
2431 }
2432
e06b425b
CH
2433 ret = 0;
2434out:
e5ea00a5 2435 kfree(nvm);
1a893c2b 2436 kfree(id);
240e6ee2
KB
2437 return ret;
2438}
2439
1a893c2b
CH
2440static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
2441{
46e7422c
CH
2442 bool unsupported = false;
2443 int ret;
2444
1a893c2b
CH
2445 switch (info->ids.csi) {
2446 case NVME_CSI_ZNS:
2447 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
eb867ee9
JG
2448 dev_info(ns->ctrl->device,
2449 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
1a893c2b 2450 info->nsid);
46e7422c
CH
2451 ret = nvme_update_ns_info_generic(ns, info);
2452 break;
1a893c2b 2453 }
46e7422c
CH
2454 ret = nvme_update_ns_info_block(ns, info);
2455 break;
1a893c2b 2456 case NVME_CSI_NVM:
46e7422c
CH
2457 ret = nvme_update_ns_info_block(ns, info);
2458 break;
1a893c2b 2459 default:
eb867ee9
JG
2460 dev_info(ns->ctrl->device,
2461 "block device for nsid %u not supported (csi %u)\n",
2462 info->nsid, info->ids.csi);
46e7422c
CH
2463 ret = nvme_update_ns_info_generic(ns, info);
2464 break;
2465 }
2466
2467 /*
2468 * If probing fails due an unsupported feature, hide the block device,
2469 * but still allow other access.
2470 */
2471 if (ret == -ENODEV) {
2472 ns->disk->flags |= GENHD_FL_HIDDEN;
2473 set_bit(NVME_NS_READY, &ns->flags);
2474 unsupported = true;
2475 ret = 0;
1a893c2b 2476 }
46e7422c
CH
2477
2478 if (!ret && nvme_ns_head_multipath(ns->head)) {
ac229a2d 2479 struct queue_limits *ns_lim = &ns->disk->queue->limits;
f7e0a545 2480 struct queue_limits lim;
1e1a9cec 2481 unsigned int memflags;
f7e0a545 2482
473106dd 2483 lim = queue_limits_start_update(ns->head->disk->queue);
1e1a9cec 2484 memflags = blk_mq_freeze_queue(ns->head->disk->queue);
ac229a2d
CH
2485 /*
2486 * queue_limits mixes values that are the hardware limitations
2487 * for bio splitting with what is the device configuration.
2488 *
2489 * For NVMe the device configuration can change after e.g. a
2490 * Format command, and we really want to pick up the new format
2491 * value here. But we must still stack the queue limits to the
2492 * least common denominator for multipathing to split the bios
2493 * properly.
2494 *
2495 * To work around this, we explicitly set the device
2496 * configuration to those that we just queried, but only stack
2497 * the splitting limits in to make sure we still obey possibly
2498 * lower limitations of other controllers.
2499 */
ac229a2d
CH
2500 lim.logical_block_size = ns_lim->logical_block_size;
2501 lim.physical_block_size = ns_lim->physical_block_size;
2502 lim.io_min = ns_lim->io_min;
2503 lim.io_opt = ns_lim->io_opt;
f7e0a545
CH
2504 queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
2505 ns->head->disk->disk_name);
c6e56cf6
CH
2506 if (unsupported)
2507 ns->head->disk->flags |= GENHD_FL_HIDDEN;
2508 else
6339b7ed 2509 nvme_init_integrity(ns->head, &lim, info);
30b5f20b
KB
2510 lim.max_write_streams = ns_lim->max_write_streams;
2511 lim.write_stream_granularity = ns_lim->write_stream_granularity;
f7e0a545 2512 ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
c6e56cf6
CH
2513
2514 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
2515 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
2516 nvme_mpath_revalidate_paths(ns);
2517
1e1a9cec 2518 blk_mq_unfreeze_queue(ns->head->disk->queue, memflags);
1a893c2b 2519 }
46e7422c
CH
2520
2521 return ret;
1a893c2b
CH
2522}
2523
18f03a06
CH
2524int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
2525 enum blk_unique_id type)
2526{
2527 struct nvme_ns_ids *ids = &ns->head->ids;
2528
2529 if (type != BLK_UID_EUI64)
2530 return -EINVAL;
2531
2532 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) {
2533 memcpy(id, &ids->nguid, sizeof(ids->nguid));
2534 return sizeof(ids->nguid);
2535 }
2536 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) {
2537 memcpy(id, &ids->eui64, sizeof(ids->eui64));
2538 return sizeof(ids->eui64);
2539 }
2540
2541 return -EINVAL;
2542}
2543
2544static int nvme_get_unique_id(struct gendisk *disk, u8 id[16],
2545 enum blk_unique_id type)
2546{
2547 return nvme_ns_get_unique_id(disk->private_data, id, type);
2548}
2549
a98e58e5 2550#ifdef CONFIG_BLK_SED_OPAL
94cc781f 2551static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
4f1244c8 2552 bool send)
a98e58e5 2553{
4f1244c8 2554 struct nvme_ctrl *ctrl = data;
cc72c442 2555 struct nvme_command cmd = { };
a98e58e5 2556
a98e58e5
SB
2557 if (send)
2558 cmd.common.opcode = nvme_admin_security_send;
2559 else
2560 cmd.common.opcode = nvme_admin_security_recv;
a98e58e5 2561 cmd.common.nsid = 0;
b7c8f366
CK
2562 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2563 cmd.common.cdw11 = cpu_to_le32(len);
a98e58e5 2564
6b46fa02 2565 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
bd2687f2 2566 NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
a98e58e5 2567}
94cc781f
CH
2568
2569static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2570{
2571 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
2572 if (!ctrl->opal_dev)
2573 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
2574 else if (was_suspended)
2575 opal_unlock_from_suspend(ctrl->opal_dev);
2576 } else {
2577 free_opal_dev(ctrl->opal_dev);
2578 ctrl->opal_dev = NULL;
2579 }
2580}
2581#else
2582static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2583{
2584}
a98e58e5
SB
2585#endif /* CONFIG_BLK_SED_OPAL */
2586
8b4fb0f9
CH
2587#ifdef CONFIG_BLK_DEV_ZONED
2588static int nvme_report_zones(struct gendisk *disk, sector_t sector,
2589 unsigned int nr_zones, report_zones_cb cb, void *data)
2590{
2591 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
2592 data);
2593}
2594#else
2595#define nvme_report_zones NULL
2596#endif /* CONFIG_BLK_DEV_ZONED */
2597
942e21c0 2598const struct block_device_operations nvme_bdev_ops = {
1673f1f0
CH
2599 .owner = THIS_MODULE,
2600 .ioctl = nvme_ioctl,
a25d4261 2601 .compat_ioctl = blkdev_compat_ptr_ioctl,
1673f1f0
CH
2602 .open = nvme_open,
2603 .release = nvme_release,
2604 .getgeo = nvme_getgeo,
18f03a06 2605 .get_unique_id = nvme_get_unique_id,
240e6ee2 2606 .report_zones = nvme_report_zones,
1673f1f0
CH
2607 .pr_ops = &nvme_pr_ops,
2608};
2609
e6d275de
CH
2610static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
2611 u32 timeout, const char *op)
5fd4ce1b 2612{
e6d275de
CH
2613 unsigned long timeout_jiffies = jiffies + timeout * HZ;
2614 u32 csts;
5fd4ce1b
CH
2615 int ret;
2616
2617 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
0df1e4f5
KB
2618 if (csts == ~0)
2619 return -ENODEV;
e6d275de 2620 if ((csts & mask) == val)
5fd4ce1b
CH
2621 break;
2622
3e98c244 2623 usleep_range(1000, 2000);
5fd4ce1b
CH
2624 if (fatal_signal_pending(current))
2625 return -EINTR;
354201c5 2626 if (time_after(jiffies, timeout_jiffies)) {
1b3c47c1 2627 dev_err(ctrl->device,
94d2e705 2628 "Device not ready; aborting %s, CSTS=0x%x\n",
e6d275de 2629 op, csts);
5fd4ce1b
CH
2630 return -ENODEV;
2631 }
2632 }
2633
2634 return ret;
2635}
2636
285b6e9b 2637int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
5fd4ce1b
CH
2638{
2639 int ret;
2640
2641 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
285b6e9b
CH
2642 if (shutdown)
2643 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2644 else
2645 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
5fd4ce1b
CH
2646
2647 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2648 if (ret)
2649 return ret;
54adc010 2650
285b6e9b
CH
2651 if (shutdown) {
2652 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
2653 NVME_CSTS_SHST_CMPLT,
2654 ctrl->shutdown_timeout, "shutdown");
2655 }
b5a10c5f 2656 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
54adc010 2657 msleep(NVME_QUIRK_DELAY_AMOUNT);
e6d275de
CH
2658 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0,
2659 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset");
5fd4ce1b 2660}
576d55d6 2661EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
5fd4ce1b 2662
c0f2f45b 2663int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
5fd4ce1b 2664{
6c3c05b0 2665 unsigned dev_page_min;
354201c5 2666 u32 timeout;
5fd4ce1b
CH
2667 int ret;
2668
c0f2f45b
SG
2669 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2670 if (ret) {
2671 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2672 return ret;
2673 }
2674 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2675
6c3c05b0 2676 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
1b3c47c1 2677 dev_err(ctrl->device,
5fd4ce1b 2678 "Minimum device page size %u too large for host (%u)\n",
6c3c05b0 2679 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
5fd4ce1b
CH
2680 return -ENODEV;
2681 }
2682
71010c30
NC
2683 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2684 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2685 else
2686 ctrl->ctrl_config = NVME_CC_CSS_NVM;
354201c5 2687
0ce96a67
GJ
2688 /*
2689 * Setting CRIME results in CSTS.RDY before the media is ready. This
2690 * makes it possible for media related commands to return the error
2691 * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is
2692 * restructured to handle retries, disable CC.CRIME.
2693 */
2694 ctrl->ctrl_config &= ~NVME_CC_CRIME;
354201c5 2695
6c3c05b0 2696 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
60b43f62 2697 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
5fd4ce1b 2698 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
aa41d2fe
NC
2699 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2700 if (ret)
2701 return ret;
5fd4ce1b 2702
6cc834ba
KB
2703 /* CAP value may change after initial CC write */
2704 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2705 if (ret)
2706 return ret;
2707
2708 timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2709 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2710 u32 crto, ready_timeout;
2711
2712 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2713 if (ret) {
2714 dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2715 ret);
2716 return ret;
2717 }
2718
2719 /*
2720 * CRTO should always be greater or equal to CAP.TO, but some
2721 * devices are known to get this wrong. Use the larger of the
2722 * two values.
2723 */
0ce96a67 2724 ready_timeout = NVME_CRTO_CRWMT(crto);
6cc834ba
KB
2725
2726 if (ready_timeout < timeout)
2727 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
2728 crto, ctrl->cap);
2729 else
2730 timeout = ready_timeout;
2731 }
2732
aa41d2fe 2733 ctrl->ctrl_config |= NVME_CC_ENABLE;
5fd4ce1b
CH
2734 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2735 if (ret)
2736 return ret;
e6d275de
CH
2737 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY,
2738 (timeout + 1) / 2, "initialisation");
5fd4ce1b 2739}
576d55d6 2740EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
5fd4ce1b 2741
dbf86b39
JD
2742static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2743{
2744 __le64 ts;
2745 int ret;
2746
2747 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2748 return 0;
2749
2750 ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2751 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2752 NULL);
2753 if (ret)
2754 dev_warn_once(ctrl->device,
2755 "could not set timestamp (%d)\n", ret);
2756 return ret;
2757}
2758
4020aad8 2759static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
49cd84b6
KB
2760{
2761 struct nvme_feat_host_behavior *host;
4020aad8 2762 u8 acre = 0, lbafee = 0;
49cd84b6
KB
2763 int ret;
2764
2765 /* Don't bother enabling the feature if retry delay is not reported */
4020aad8
KB
2766 if (ctrl->crdt[0])
2767 acre = NVME_ENABLE_ACRE;
2768 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
2769 lbafee = NVME_ENABLE_LBAFEE;
2770
2771 if (!acre && !lbafee)
49cd84b6
KB
2772 return 0;
2773
2774 host = kzalloc(sizeof(*host), GFP_KERNEL);
2775 if (!host)
2776 return 0;
2777
4020aad8
KB
2778 host->acre = acre;
2779 host->lbafee = lbafee;
49cd84b6
KB
2780 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2781 host, sizeof(*host), NULL);
2782 kfree(host);
2783 return ret;
2784}
2785
ebd8a93a
AB
2786/*
2787 * The function checks whether the given total (exlat + enlat) latency of
2788 * a power state allows the latter to be used as an APST transition target.
2789 * It does so by comparing the latency to the primary and secondary latency
2790 * tolerances defined by module params. If there's a match, the corresponding
2791 * timeout value is returned and the matching tolerance index (1 or 2) is
2792 * reported.
2793 */
2794static bool nvme_apst_get_transition_time(u64 total_latency,
2795 u64 *transition_time, unsigned *last_index)
2796{
2797 if (total_latency <= apst_primary_latency_tol_us) {
2798 if (*last_index == 1)
2799 return false;
2800 *last_index = 1;
2801 *transition_time = apst_primary_timeout_ms;
2802 return true;
2803 }
2804 if (apst_secondary_timeout_ms &&
2805 total_latency <= apst_secondary_latency_tol_us) {
2806 if (*last_index <= 2)
2807 return false;
2808 *last_index = 2;
2809 *transition_time = apst_secondary_timeout_ms;
2810 return true;
2811 }
2812 return false;
2813}
2814
60df5de9
CH
2815/*
2816 * APST (Autonomous Power State Transition) lets us program a table of power
2817 * state transitions that the controller will perform automatically.
ebd8a93a
AB
2818 *
2819 * Depending on module params, one of the two supported techniques will be used:
2820 *
2821 * - If the parameters provide explicit timeouts and tolerances, they will be
2822 * used to build a table with up to 2 non-operational states to transition to.
2823 * The default parameter values were selected based on the values used by
2824 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
2825 * regeneration of the APST table in the event of switching between external
2826 * and battery power, the timeouts and tolerances reflect a compromise
2827 * between values used by Microsoft for AC and battery scenarios.
2828 * - If not, we'll configure the table with a simple heuristic: we are willing
2829 * to spend at most 2% of the time transitioning between power states.
2830 * Therefore, when running in any given state, we will enter the next
2831 * lower-power non-operational state after waiting 50 * (enlat + exlat)
2832 * microseconds, as long as that state's exit latency is under the requested
2833 * maximum latency.
60df5de9
CH
2834 *
2835 * We will not autonomously enter any non-operational state for which the total
2836 * latency exceeds ps_max_latency_us.
2837 *
2838 * Users can set ps_max_latency_us to zero to turn off APST.
2839 */
634b8325 2840static int nvme_configure_apst(struct nvme_ctrl *ctrl)
c5552fde 2841{
c5552fde 2842 struct nvme_feat_auto_pst *table;
60df5de9 2843 unsigned apste = 0;
fb0dc399 2844 u64 max_lat_us = 0;
60df5de9 2845 __le64 target = 0;
fb0dc399 2846 int max_ps = -1;
60df5de9 2847 int state;
c5552fde 2848 int ret;
ebd8a93a 2849 unsigned last_lt_index = UINT_MAX;
c5552fde
AL
2850
2851 /*
2852 * If APST isn't supported or if we haven't been initialized yet,
2853 * then don't do anything.
2854 */
2855 if (!ctrl->apsta)
634b8325 2856 return 0;
c5552fde
AL
2857
2858 if (ctrl->npss > 31) {
2859 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
634b8325 2860 return 0;
c5552fde
AL
2861 }
2862
2863 table = kzalloc(sizeof(*table), GFP_KERNEL);
2864 if (!table)
634b8325 2865 return 0;
c5552fde 2866
76a5af84 2867 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
c5552fde 2868 /* Turn off APST. */
fb0dc399 2869 dev_dbg(ctrl->device, "APST disabled\n");
60df5de9
CH
2870 goto done;
2871 }
c5552fde 2872
60df5de9
CH
2873 /*
2874 * Walk through all states from lowest- to highest-power.
2875 * According to the spec, lower-numbered states use more power. NPSS,
2876 * despite the name, is the index of the lowest-power state, not the
2877 * number of states.
2878 */
2879 for (state = (int)ctrl->npss; state >= 0; state--) {
2880 u64 total_latency_us, exit_latency_us, transition_ms;
da87591b 2881
60df5de9
CH
2882 if (target)
2883 table->entries[state] = target;
c5552fde 2884
c5552fde 2885 /*
60df5de9
CH
2886 * Don't allow transitions to the deepest state if it's quirked
2887 * off.
c5552fde 2888 */
60df5de9
CH
2889 if (state == ctrl->npss &&
2890 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2891 continue;
fb0dc399 2892
60df5de9
CH
2893 /*
2894 * Is this state a useful non-operational state for higher-power
2895 * states to autonomously transition to?
2896 */
2897 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2898 continue;
fb0dc399 2899
60df5de9
CH
2900 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2901 if (exit_latency_us > ctrl->ps_max_latency_us)
2902 continue;
c5552fde 2903
60df5de9
CH
2904 total_latency_us = exit_latency_us +
2905 le32_to_cpu(ctrl->psd[state].entry_lat);
fb0dc399 2906
60df5de9 2907 /*
ebd8a93a
AB
2908 * This state is good. It can be used as the APST idle target
2909 * for higher power states.
60df5de9 2910 */
ebd8a93a
AB
2911 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
2912 if (!nvme_apst_get_transition_time(total_latency_us,
2913 &transition_ms, &last_lt_index))
2914 continue;
2915 } else {
2916 transition_ms = total_latency_us + 19;
2917 do_div(transition_ms, 20);
2918 if (transition_ms > (1 << 24) - 1)
2919 transition_ms = (1 << 24) - 1;
2920 }
60df5de9
CH
2921
2922 target = cpu_to_le64((state << 3) | (transition_ms << 8));
2923 if (max_ps == -1)
2924 max_ps = state;
2925 if (total_latency_us > max_lat_us)
2926 max_lat_us = total_latency_us;
c5552fde
AL
2927 }
2928
60df5de9
CH
2929 if (max_ps == -1)
2930 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2931 else
2932 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2933 max_ps, max_lat_us, (int)sizeof(*table), table);
2934 apste = 1;
2935
2936done:
c5552fde
AL
2937 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2938 table, sizeof(*table), NULL);
2939 if (ret)
2940 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
c5552fde 2941 kfree(table);
634b8325 2942 return ret;
c5552fde
AL
2943}
2944
2945static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2946{
2947 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2948 u64 latency;
2949
2950 switch (val) {
2951 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2952 case PM_QOS_LATENCY_ANY:
2953 latency = U64_MAX;
2954 break;
2955
2956 default:
2957 latency = val;
2958 }
2959
2960 if (ctrl->ps_max_latency_us != latency) {
2961 ctrl->ps_max_latency_us = latency;
e6e7f7ac 2962 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
53fe2a30 2963 nvme_configure_apst(ctrl);
c5552fde
AL
2964 }
2965}
2966
bd4da3ab
AL
2967struct nvme_core_quirk_entry {
2968 /*
2969 * NVMe model and firmware strings are padded with spaces. For
2970 * simplicity, strings in the quirk table are padded with NULLs
2971 * instead.
2972 */
2973 u16 vid;
2974 const char *mn;
2975 const char *fr;
2976 unsigned long quirks;
2977};
2978
2979static const struct nvme_core_quirk_entry core_quirks[] = {
c5552fde 2980 {
be56945c
AL
2981 /*
2982 * This Toshiba device seems to die using any APST states. See:
2983 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2984 */
2985 .vid = 0x1179,
2986 .mn = "THNSF5256GPUK TOSHIBA",
c5552fde 2987 .quirks = NVME_QUIRK_NO_APST,
cb32de1b
ML
2988 },
2989 {
2990 /*
2991 * This LiteON CL1-3D*-Q11 firmware version has a race
2992 * condition associated with actions related to suspend to idle
2993 * LiteON has resolved the problem in future firmware
2994 */
2995 .vid = 0x14a4,
2996 .fr = "22301111",
2997 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
5a6254d5
EM
2998 },
2999 {
3000 /*
3001 * This Kioxia CD6-V Series / HPE PE8030 device times out and
3002 * aborts I/O during any load, but more easily reproducible
3003 * with discards (fstrim).
3004 *
3005 * The device is left in a state where it is also not possible
3006 * to use "nvme set-feature" to disable APST, but booting with
3007 * nvme_core.default_ps_max_latency=0 works.
3008 */
3009 .vid = 0x1e0f,
3010 .mn = "KCD6XVUL6T40",
3011 .quirks = NVME_QUIRK_NO_APST,
e6487833
CH
3012 },
3013 {
3014 /*
3015 * The external Samsung X5 SSD fails initialization without a
3016 * delay before checking if it is ready and has a whole set of
3017 * other problems. To make this even more interesting, it
3018 * shares the PCI ID with internal Samsung 970 Evo Plus that
3019 * does not need or want these quirks.
3020 */
3021 .vid = 0x144d,
3022 .mn = "Samsung Portable SSD X5",
3023 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
3024 NVME_QUIRK_NO_DEEPEST_PS |
3025 NVME_QUIRK_IGNORE_DEV_SUBNQN,
be56945c 3026 }
bd4da3ab
AL
3027};
3028
3029/* match is null-terminated but idstr is space-padded. */
3030static bool string_matches(const char *idstr, const char *match, size_t len)
3031{
3032 size_t matchlen;
3033
3034 if (!match)
3035 return true;
3036
3037 matchlen = strlen(match);
3038 WARN_ON_ONCE(matchlen > len);
3039
3040 if (memcmp(idstr, match, matchlen))
3041 return false;
3042
3043 for (; matchlen < len; matchlen++)
3044 if (idstr[matchlen] != ' ')
3045 return false;
3046
3047 return true;
3048}
3049
3050static bool quirk_matches(const struct nvme_id_ctrl *id,
3051 const struct nvme_core_quirk_entry *q)
3052{
3053 return q->vid == le16_to_cpu(id->vid) &&
3054 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
3055 string_matches(id->fr, q->fr, sizeof(id->fr));
3056}
3057
ab9e00cc
CH
3058static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
3059 struct nvme_id_ctrl *id)
180de007
CH
3060{
3061 size_t nqnlen;
3062 int off;
3063
6299358d
JD
3064 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
3065 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
3066 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
a8817cc0 3067 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
6299358d
JD
3068 return;
3069 }
180de007 3070
6299358d
JD
3071 if (ctrl->vs >= NVME_VS(1, 2, 1))
3072 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
3073 }
180de007 3074
1abc6961
LB
3075 /*
3076 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
3077 * Base Specification 2.0. It is slightly different from the format
3078 * specified there due to historic reasons, and we can't change it now.
3079 */
ab9e00cc 3080 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
3da584f5 3081 "nqn.2014.08.org.nvmexpress:%04x%04x",
180de007 3082 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
ab9e00cc 3083 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
180de007 3084 off += sizeof(id->sn);
ab9e00cc 3085 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
180de007 3086 off += sizeof(id->mn);
ab9e00cc
CH
3087 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
3088}
3089
e654dfd3 3090static void nvme_release_subsystem(struct device *dev)
ab9e00cc 3091{
e654dfd3
LG
3092 struct nvme_subsystem *subsys =
3093 container_of(dev, struct nvme_subsystem, dev);
3094
733e4b69 3095 if (subsys->instance >= 0)
8b850475 3096 ida_free(&nvme_instance_ida, subsys->instance);
ab9e00cc
CH
3097 kfree(subsys);
3098}
3099
ab9e00cc
CH
3100static void nvme_destroy_subsystem(struct kref *ref)
3101{
3102 struct nvme_subsystem *subsys =
3103 container_of(ref, struct nvme_subsystem, ref);
3104
3105 mutex_lock(&nvme_subsystems_lock);
3106 list_del(&subsys->entry);
3107 mutex_unlock(&nvme_subsystems_lock);
3108
ed754e5d 3109 ida_destroy(&subsys->ns_ida);
ab9e00cc
CH
3110 device_del(&subsys->dev);
3111 put_device(&subsys->dev);
3112}
3113
3114static void nvme_put_subsystem(struct nvme_subsystem *subsys)
3115{
3116 kref_put(&subsys->ref, nvme_destroy_subsystem);
3117}
3118
3119static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
3120{
3121 struct nvme_subsystem *subsys;
3122
3123 lockdep_assert_held(&nvme_subsystems_lock);
3124
c26aa572
JS
3125 /*
3126 * Fail matches for discovery subsystems. This results
3127 * in each discovery controller bound to a unique subsystem.
3128 * This avoids issues with validating controller values
3129 * that can only be true when there is a single unique subsystem.
3130 * There may be multiple and completely independent entities
3131 * that provide discovery controllers.
3132 */
3133 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
3134 return NULL;
3135
ab9e00cc
CH
3136 list_for_each_entry(subsys, &nvme_subsystems, entry) {
3137 if (strcmp(subsys->subnqn, subsysnqn))
3138 continue;
3139 if (!kref_get_unless_zero(&subsys->ref))
3140 continue;
3141 return subsys;
3142 }
3143
3144 return NULL;
3145}
3146
5ab25a32
SG
3147static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
3148{
3149 return ctrl->opts && ctrl->opts->discovery_nqn;
3150}
3151
1b1031ca
CH
3152static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
3153 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
b837b283 3154{
1b1031ca 3155 struct nvme_ctrl *tmp;
b837b283 3156
32fd90c4
CH
3157 lockdep_assert_held(&nvme_subsystems_lock);
3158
1b1031ca 3159 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
e7c43fea 3160 if (nvme_state_terminal(tmp))
1b1031ca
CH
3161 continue;
3162
3163 if (tmp->cntlid == ctrl->cntlid) {
3164 dev_err(ctrl->device,
16cc33b2
KB
3165 "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
3166 ctrl->cntlid, dev_name(tmp->device),
3167 subsys->subnqn);
1b1031ca
CH
3168 return false;
3169 }
b837b283 3170
92decf11 3171 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
5ab25a32 3172 nvme_discovery_ctrl(ctrl))
1b1031ca
CH
3173 continue;
3174
3175 dev_err(ctrl->device,
3176 "Subsystem does not support multiple controllers\n");
3177 return false;
b837b283 3178 }
b837b283 3179
1b1031ca 3180 return true;
b837b283
IR
3181}
3182
ab9e00cc
CH
3183static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3184{
3185 struct nvme_subsystem *subsys, *found;
3186 int ret;
3187
3188 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
3189 if (!subsys)
3190 return -ENOMEM;
733e4b69
KB
3191
3192 subsys->instance = -1;
ab9e00cc
CH
3193 mutex_init(&subsys->lock);
3194 kref_init(&subsys->ref);
3195 INIT_LIST_HEAD(&subsys->ctrls);
ed754e5d 3196 INIT_LIST_HEAD(&subsys->nsheads);
ab9e00cc
CH
3197 nvme_init_subnqn(subsys, ctrl, id);
3198 memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
3199 memcpy(subsys->model, id->mn, sizeof(subsys->model));
ab9e00cc
CH
3200 subsys->vendor_id = le16_to_cpu(id->vid);
3201 subsys->cmic = id->cmic;
f46d2734 3202 subsys->awupf = le16_to_cpu(id->awupf);
954ae166
HR
3203
3204 /* Versions prior to 1.4 don't necessarily report a valid type */
3205 if (id->cntrltype == NVME_CTRL_DISC ||
3206 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
3207 subsys->subtype = NVME_NQN_DISC;
3208 else
3209 subsys->subtype = NVME_NQN_NVME;
3210
20e8b689
HR
3211 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
3212 dev_err(ctrl->device,
3213 "Subsystem %s is not a discovery controller",
3214 subsys->subnqn);
3215 kfree(subsys);
3216 return -EINVAL;
3217 }
e3d34794 3218 nvme_mpath_default_iopolicy(subsys);
ab9e00cc 3219
ab21f3d9 3220 subsys->dev.class = &nvme_subsys_class;
ab9e00cc 3221 subsys->dev.release = nvme_release_subsystem;
1e496938 3222 subsys->dev.groups = nvme_subsys_attrs_groups;
733e4b69 3223 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
ab9e00cc
CH
3224 device_initialize(&subsys->dev);
3225
3226 mutex_lock(&nvme_subsystems_lock);
3227 found = __nvme_find_get_subsystem(subsys->subnqn);
3228 if (found) {
e654dfd3 3229 put_device(&subsys->dev);
ab9e00cc 3230 subsys = found;
32fd90c4 3231
1b1031ca 3232 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
ab9e00cc 3233 ret = -EINVAL;
32fd90c4 3234 goto out_put_subsystem;
ab9e00cc 3235 }
ab9e00cc
CH
3236 } else {
3237 ret = device_add(&subsys->dev);
3238 if (ret) {
3239 dev_err(ctrl->device,
3240 "failed to register subsystem device.\n");
8c36e66f 3241 put_device(&subsys->dev);
ab9e00cc
CH
3242 goto out_unlock;
3243 }
ed754e5d 3244 ida_init(&subsys->ns_ida);
ab9e00cc
CH
3245 list_add_tail(&subsys->entry, &nvme_subsystems);
3246 }
3247
bc4f6e06
DC
3248 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
3249 dev_name(ctrl->device));
3250 if (ret) {
ab9e00cc
CH
3251 dev_err(ctrl->device,
3252 "failed to create sysfs link from subsystem.\n");
32fd90c4 3253 goto out_put_subsystem;
ab9e00cc
CH
3254 }
3255
733e4b69
KB
3256 if (!found)
3257 subsys->instance = ctrl->instance;
32fd90c4 3258 ctrl->subsys = subsys;
ab9e00cc 3259 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
32fd90c4 3260 mutex_unlock(&nvme_subsystems_lock);
ab9e00cc
CH
3261 return 0;
3262
32fd90c4
CH
3263out_put_subsystem:
3264 nvme_put_subsystem(subsys);
ab9e00cc
CH
3265out_unlock:
3266 mutex_unlock(&nvme_subsystems_lock);
ab9e00cc 3267 return ret;
180de007
CH
3268}
3269
d4f8359e
CH
3270static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page,
3271 u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi)
c627c487
KB
3272{
3273 struct nvme_command c = { };
71fb90eb 3274 u32 dwlen = nvme_bytes_to_numd(size);
70da6094
MB
3275
3276 c.get_log_page.opcode = nvme_admin_get_log_page;
0e98719b 3277 c.get_log_page.nsid = cpu_to_le32(nsid);
70da6094 3278 c.get_log_page.lid = log_page;
0e98719b 3279 c.get_log_page.lsp = lsp;
70da6094
MB
3280 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
3281 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
7ec6074f
MB
3282 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
3283 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
be93e87e 3284 c.get_log_page.csi = csi;
d4f8359e 3285 c.get_log_page.lsi = cpu_to_le16(lsi);
c627c487
KB
3286
3287 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
3288}
3289
d4f8359e
CH
3290int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
3291 void *log, size_t size, u64 offset)
3292{
3293 return nvme_get_log_lsi(ctrl, nsid, log_page, lsp, csi, log, size,
3294 offset, 0);
3295}
3296
be93e87e
KB
3297static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
3298 struct nvme_effects_log **log)
84fef62d 3299{
ac32057a 3300 struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
84fef62d
KB
3301 int ret;
3302
be93e87e
KB
3303 if (cel)
3304 goto out;
84fef62d 3305
be93e87e
KB
3306 cel = kzalloc(sizeof(*cel), GFP_KERNEL);
3307 if (!cel)
3308 return -ENOMEM;
84fef62d 3309
46d2613e 3310 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
f6224b86 3311 cel, sizeof(*cel), 0);
84fef62d 3312 if (ret) {
be93e87e
KB
3313 kfree(cel);
3314 return ret;
84fef62d 3315 }
be93e87e 3316
ac32057a
KN
3317 old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
3318 if (xa_is_err(old)) {
3319 kfree(cel);
3320 return xa_err(old);
3321 }
be93e87e 3322out:
f6224b86 3323 *log = cel;
be93e87e 3324 return 0;
180de007
CH
3325}
3326
5befc7c2 3327static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
7fd8930f 3328{
8609c63f 3329 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
7fd8930f 3330
8609c63f
BVA
3331 if (check_shl_overflow(1U, units + page_shift - 9, &val))
3332 return UINT_MAX;
3333 return val;
5befc7c2
KB
3334}
3335
3336static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
3337{
3338 struct nvme_command c = { };
3339 struct nvme_id_ctrl_nvm *id;
3340 int ret;
3341
5befc7c2
KB
3342 /*
3343 * Even though NVMe spec explicitly states that MDTS is not applicable
3344 * to the write-zeroes, we are cautious and limit the size to the
3345 * controllers max_hw_sectors value, which is based on the MDTS field
3346 * and possibly other limiting factors.
3347 */
3348 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
3349 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
3350 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
3351 else
3352 ctrl->max_zeroes_sectors = 0;
3353
def84ab6 3354 if (ctrl->subsys->subtype != NVME_NQN_NVME ||
f54f0d0e 3355 !nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) ||
c917dd96 3356 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
5befc7c2
KB
3357 return 0;
3358
3359 id = kzalloc(sizeof(*id), GFP_KERNEL);
3360 if (!id)
bcaf434b 3361 return -ENOMEM;
5befc7c2
KB
3362
3363 c.identify.opcode = nvme_admin_identify;
3364 c.identify.cns = NVME_ID_CNS_CS_CTRL;
3365 c.identify.csi = NVME_CSI_NVM;
3366
3367 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
3368 if (ret)
3369 goto free_data;
3370
3b946fe1 3371 ctrl->dmrl = id->dmrl;
1a86924e 3372 ctrl->dmrsl = le32_to_cpu(id->dmrsl);
5befc7c2
KB
3373 if (id->wzsl)
3374 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
3375
3376free_data:
c917dd96
KB
3377 if (ret > 0)
3378 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
5befc7c2
KB
3379 kfree(id);
3380 return ret;
3381}
3382
d4a95ade
KN
3383static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
3384 u8 csi, struct nvme_effects_log **log)
3385{
3386 struct nvme_effects_log *effects, *old;
3387
3388 effects = kzalloc(sizeof(*effects), GFP_KERNEL);
170e086a 3389 if (!effects)
d4a95ade
KN
3390 return -ENOMEM;
3391
3392 old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
3393 if (xa_is_err(old)) {
3394 kfree(effects);
3395 return xa_err(old);
3396 }
3397
3398 *log = effects;
3399 return 0;
3400}
3401
cc115cbe
KB
3402static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
3403{
3404 struct nvme_effects_log *log = ctrl->effects;
3405
3406 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
3407 NVME_CMD_EFFECTS_NCC |
3408 NVME_CMD_EFFECTS_CSE_MASK);
3409 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
3410 NVME_CMD_EFFECTS_CSE_MASK);
3411
baff6491
KB
3412 /*
3413 * The spec says the result of a security receive command depends on
3414 * the previous security send command. As such, many vendors log this
3415 * command as one to submitted only when no other commands to the same
3416 * namespace are outstanding. The intention is to tell the host to
3417 * prevent mixing security send and receive.
3418 *
3419 * This driver can only enforce such exclusive access against IO
3420 * queues, though. We are not readily able to enforce such a rule for
3421 * two commands to the admin queue, which is the only queue that
3422 * matters for this command.
3423 *
3424 * Rather than blindly freezing the IO queues for this effect that
3425 * doesn't even apply to IO, mask it off.
3426 */
c0c33b94 3427 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK);
baff6491 3428
cc115cbe
KB
3429 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3430 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3431 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3432}
3433
3434static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3435{
3436 int ret = 0;
3437
3438 if (ctrl->effects)
3439 return 0;
3440
3441 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
3442 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3443 if (ret < 0)
3444 return ret;
3445 }
3446
3447 if (!ctrl->effects) {
d4a95ade
KN
3448 ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3449 if (ret < 0)
3450 return ret;
cc115cbe
KB
3451 }
3452
3453 nvme_init_known_nvm_effects(ctrl);
3454 return 0;
3455}
3456
68999d1d
GL
3457static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3458{
3459 /*
3460 * In fabrics we need to verify the cntlid matches the
3461 * admin connect
3462 */
3463 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3464 dev_err(ctrl->device,
3465 "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n",
3466 ctrl->cntlid, le16_to_cpu(id->cntlid));
3467 return -EINVAL;
3468 }
3469
3470 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3471 dev_err(ctrl->device,
3472 "keep-alive support is mandatory for fabrics\n");
3473 return -EINVAL;
3474 }
3475
7642138e 3476 if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) {
2fcd3ab3
GL
3477 dev_err(ctrl->device,
3478 "I/O queue command capsule supported size %d < 4\n",
3479 ctrl->ioccsz);
3480 return -EINVAL;
3481 }
3482
7642138e 3483 if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) {
2fcd3ab3
GL
3484 dev_err(ctrl->device,
3485 "I/O queue response capsule supported size %d < 1\n",
3486 ctrl->iorcsz);
3487 return -EINVAL;
3488 }
3489
49995681 3490 if (!ctrl->maxcmd) {
88c23a32
ML
3491 dev_warn(ctrl->device,
3492 "Firmware bug: maximum outstanding commands is 0\n");
3493 ctrl->maxcmd = ctrl->sqsize + 1;
49995681
GL
3494 }
3495
68999d1d
GL
3496 return 0;
3497}
3498
44ef5611 3499static int nvme_init_identify(struct nvme_ctrl *ctrl)
7fd8930f 3500{
e6c9b130 3501 struct queue_limits lim;
7fd8930f 3502 struct nvme_id_ctrl *id;
a229dbf6 3503 u32 max_hw_sectors;
76a5af84 3504 bool prev_apst_enabled;
5befc7c2 3505 int ret;
f3ca80fc 3506
7fd8930f
CH
3507 ret = nvme_identify_ctrl(ctrl, &id);
3508 if (ret) {
1b3c47c1 3509 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
7fd8930f
CH
3510 return -EIO;
3511 }
3512
a89fcca8
GP
3513 if (!(ctrl->ops->flags & NVME_F_FABRICS))
3514 ctrl->cntlid = le16_to_cpu(id->cntlid);
3515
bd4da3ab 3516 if (!ctrl->identified) {
44ef5611 3517 unsigned int i;
ab9e00cc 3518
bd4da3ab
AL
3519 /*
3520 * Check for quirks. Quirk can depend on firmware version,
3521 * so, in principle, the set of quirks present can change
3522 * across a reset. As a possible future enhancement, we
3523 * could re-scan for quirks every time we reinitialize
3524 * the device, but we'd have to make sure that the driver
3525 * behaves intelligently if the quirks change.
3526 */
bd4da3ab
AL
3527 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
3528 if (quirk_matches(id, &core_quirks[i]))
3529 ctrl->quirks |= core_quirks[i].quirks;
3530 }
6f2d7152
PR
3531
3532 ret = nvme_init_subsystem(ctrl, id);
3533 if (ret)
3534 goto out_free;
cc115cbe
KB
3535
3536 ret = nvme_init_effects(ctrl, id);
3537 if (ret)
3538 goto out_free;
bd4da3ab 3539 }
f46d2734
CH
3540
3541 if (le16_to_cpu(id->awupf) != ctrl->subsys->awupf) {
3542 dev_err_ratelimited(ctrl->device,
3543 "inconsistent AWUPF, controller not added (%u/%u).\n",
3544 le16_to_cpu(id->awupf), ctrl->subsys->awupf);
3545 ret = -EINVAL;
3546 goto out_free;
3547 }
3548
a8eb6c1b
KB
3549 memcpy(ctrl->subsys->firmware_rev, id->fr,
3550 sizeof(ctrl->subsys->firmware_rev));
bd4da3ab 3551
c35e30b4 3552 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
f0425db0 3553 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
c35e30b4
AL
3554 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
3555 }
3556
49cd84b6
KB
3557 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
3558 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
3559 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
3560
8a9ae523 3561 ctrl->oacs = le16_to_cpu(id->oacs);
43e2d08d 3562 ctrl->oncs = le16_to_cpu(id->oncs);
2d466c7a 3563 ctrl->mtfa = le16_to_cpu(id->mtfa);
c0561f82 3564 ctrl->oaes = le32_to_cpu(id->oaes);
400b6a7b
GR
3565 ctrl->wctemp = le16_to_cpu(id->wctemp);
3566 ctrl->cctemp = le16_to_cpu(id->cctemp);
3567
6bf25d16 3568 atomic_set(&ctrl->abort_limit, id->acl + 1);
7fd8930f 3569 ctrl->vwc = id->vwc;
7fd8930f 3570 if (id->mdts)
5befc7c2 3571 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
7fd8930f 3572 else
a229dbf6
CH
3573 max_hw_sectors = UINT_MAX;
3574 ctrl->max_hw_sectors =
3575 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
7fd8930f 3576
e6c9b130
CH
3577 lim = queue_limits_start_update(ctrl->admin_q);
3578 nvme_set_ctrl_limits(ctrl, &lim);
3579 ret = queue_limits_commit_update(ctrl->admin_q, &lim);
3580 if (ret)
3581 goto out_free;
3582
07bfcd09 3583 ctrl->sgls = le32_to_cpu(id->sgls);
038bd4cb 3584 ctrl->kas = le16_to_cpu(id->kas);
0d0b660f 3585 ctrl->max_namespaces = le32_to_cpu(id->mnan);
3e53ba38 3586 ctrl->ctratt = le32_to_cpu(id->ctratt);
07bfcd09 3587
86c2457a
MB
3588 ctrl->cntrltype = id->cntrltype;
3589 ctrl->dctype = id->dctype;
3590
07fbd32a
MP
3591 if (id->rtd3e) {
3592 /* us -> s */
f5af577d 3593 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
07fbd32a
MP
3594
3595 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
3596 shutdown_timeout, 60);
3597
3598 if (ctrl->shutdown_timeout != shutdown_timeout)
1a3838d7 3599 dev_info(ctrl->device,
34485c37 3600 "D3 entry latency set to %u seconds\n",
07fbd32a
MP
3601 ctrl->shutdown_timeout);
3602 } else
3603 ctrl->shutdown_timeout = shutdown_timeout;
3604
c5552fde 3605 ctrl->npss = id->npss;
76a5af84
KHF
3606 ctrl->apsta = id->apsta;
3607 prev_apst_enabled = ctrl->apst_enabled;
c35e30b4
AL
3608 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3609 if (force_apst && id->apsta) {
f0425db0 3610 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
76a5af84 3611 ctrl->apst_enabled = true;
c35e30b4 3612 } else {
76a5af84 3613 ctrl->apst_enabled = false;
c35e30b4
AL
3614 }
3615 } else {
76a5af84 3616 ctrl->apst_enabled = id->apsta;
c35e30b4 3617 }
c5552fde
AL
3618 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3619
d3d5b87d 3620 if (ctrl->ops->flags & NVME_F_FABRICS) {
07bfcd09
CH
3621 ctrl->icdoff = le16_to_cpu(id->icdoff);
3622 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3623 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3624 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3625
68999d1d
GL
3626 ret = nvme_check_ctrl_fabric_info(ctrl, id);
3627 if (ret)
634b8325 3628 goto out_free;
07bfcd09 3629 } else {
fe6d53c9
CH
3630 ctrl->hmpre = le32_to_cpu(id->hmpre);
3631 ctrl->hmmin = le32_to_cpu(id->hmmin);
044a9df1
CH
3632 ctrl->hmminds = le32_to_cpu(id->hmminds);
3633 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
07bfcd09 3634 }
da35825d 3635
5e1f6899 3636 ret = nvme_mpath_init_identify(ctrl, id);
0d0b660f 3637 if (ret < 0)
44ef5611 3638 goto out_free;
0d0b660f 3639
76a5af84 3640 if (ctrl->apst_enabled && !prev_apst_enabled)
c5552fde 3641 dev_pm_qos_expose_latency_tolerance(ctrl->device);
76a5af84 3642 else if (!ctrl->apst_enabled && prev_apst_enabled)
c5552fde 3643 dev_pm_qos_hide_latency_tolerance(ctrl->device);
44ef5611
CK
3644out_free:
3645 kfree(id);
3646 return ret;
3647}
3648
3649/*
3650 * Initialize the cached copies of the Identify data and various controller
3651 * register in our nvme_ctrl structure. This should be called as soon as
3652 * the admin queue is fully up and running.
3653 */
94cc781f 3654int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
44ef5611
CK
3655{
3656 int ret;
3657
3658 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3659 if (ret) {
3660 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3661 return ret;
3662 }
3663
3664 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3665
3666 if (ctrl->vs >= NVME_VS(1, 1, 0))
3667 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3668
3669 ret = nvme_init_identify(ctrl);
3670 if (ret)
3671 return ret;
3672
634b8325
KB
3673 ret = nvme_configure_apst(ctrl);
3674 if (ret < 0)
3675 return ret;
95d54bd1 3676
dbf86b39
JD
3677 ret = nvme_configure_timestamp(ctrl);
3678 if (ret < 0)
3679 return ret;
634b8325 3680
4020aad8 3681 ret = nvme_configure_host_options(ctrl);
49cd84b6
KB
3682 if (ret < 0)
3683 return ret;
3684
94cc781f
CH
3685 nvme_configure_opal(ctrl, was_suspended);
3686
5ab25a32 3687 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
6b8cf940
CH
3688 /*
3689 * Do not return errors unless we are in a controller reset,
3690 * the controller works perfectly fine without hwmon.
3691 */
59e330f8 3692 ret = nvme_hwmon_init(ctrl);
6b8cf940 3693 if (ret == -EINTR)
59e330f8
KB
3694 return ret;
3695 }
400b6a7b 3696
d0dd594b 3697 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
bd4da3ab 3698 ctrl->identified = true;
c5552fde 3699
4733b65d
HR
3700 nvme_start_keep_alive(ctrl);
3701
634b8325 3702 return 0;
7fd8930f 3703}
f21c4769 3704EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
7fd8930f 3705
f3ca80fc 3706static int nvme_dev_open(struct inode *inode, struct file *file)
1673f1f0 3707{
a6a5149b
CH
3708 struct nvme_ctrl *ctrl =
3709 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
1673f1f0 3710
e6e7f7ac 3711 switch (nvme_ctrl_state(ctrl)) {
2b1b7e78 3712 case NVME_CTRL_LIVE:
2b1b7e78
JW
3713 break;
3714 default:
a6a5149b 3715 return -EWOULDBLOCK;
2b1b7e78
JW
3716 }
3717
52a3974f 3718 nvme_get_ctrl(ctrl);
4bab6909
CK
3719 if (!try_module_get(ctrl->ops->module)) {
3720 nvme_put_ctrl(ctrl);
52a3974f 3721 return -EINVAL;
4bab6909 3722 }
52a3974f 3723
a6a5149b 3724 file->private_data = ctrl;
f3ca80fc
CH
3725 return 0;
3726}
3727
52a3974f
CK
3728static int nvme_dev_release(struct inode *inode, struct file *file)
3729{
3730 struct nvme_ctrl *ctrl =
3731 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3732
3733 module_put(ctrl->ops->module);
3734 nvme_put_ctrl(ctrl);
3735 return 0;
3736}
3737
f3ca80fc
CH
3738static const struct file_operations nvme_dev_fops = {
3739 .owner = THIS_MODULE,
3740 .open = nvme_dev_open,
52a3974f 3741 .release = nvme_dev_release,
f3ca80fc 3742 .unlocked_ioctl = nvme_dev_ioctl,
1832f2d8 3743 .compat_ioctl = compat_ptr_ioctl,
58e5bdeb 3744 .uring_cmd = nvme_dev_uring_cmd,
f3ca80fc
CH
3745};
3746
5974ea7c 3747static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
ed754e5d
CH
3748 unsigned nsid)
3749{
3750 struct nvme_ns_head *h;
3751
5974ea7c 3752 lockdep_assert_held(&ctrl->subsys->lock);
ed754e5d 3753
5974ea7c
SM
3754 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
3755 /*
3756 * Private namespaces can share NSIDs under some conditions.
3757 * In that case we can't use the same ns_head for namespaces
3758 * with the same NSID.
3759 */
3760 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
9edceaf4 3761 continue;
62188639 3762 if (nvme_tryget_ns_head(h))
ed754e5d
CH
3763 return h;
3764 }
3765
3766 return NULL;
3767}
3768
fd8099e7
CH
3769static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
3770 struct nvme_ns_ids *ids)
ed754e5d 3771{
e2724cb9
CH
3772 bool has_uuid = !uuid_is_null(&ids->uuid);
3773 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
3774 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
ed754e5d
CH
3775 struct nvme_ns_head *h;
3776
3777 lockdep_assert_held(&subsys->lock);
3778
3779 list_for_each_entry(h, &subsys->nsheads, entry) {
e2724cb9
CH
3780 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
3781 return -EINVAL;
3782 if (has_nguid &&
3783 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
3784 return -EINVAL;
3785 if (has_eui64 &&
3786 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
ed754e5d
CH
3787 return -EINVAL;
3788 }
3789
3790 return 0;
3791}
3792
be5eb933
AM
3793static void nvme_cdev_rel(struct device *dev)
3794{
8b850475 3795 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
be5eb933
AM
3796}
3797
2637baed
MI
3798void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
3799{
3800 cdev_device_del(cdev, cdev_device);
be5eb933 3801 put_device(cdev_device);
2637baed
MI
3802}
3803
3804int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
3805 const struct file_operations *fops, struct module *owner)
3806{
3807 int minor, ret;
3808
8b850475 3809 minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
2637baed
MI
3810 if (minor < 0)
3811 return minor;
3812 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
ab21f3d9 3813 cdev_device->class = &nvme_ns_chr_class;
be5eb933 3814 cdev_device->release = nvme_cdev_rel;
2637baed
MI
3815 device_initialize(cdev_device);
3816 cdev_init(cdev, fops);
3817 cdev->owner = owner;
3818 ret = cdev_device_add(cdev, cdev_device);
be5eb933 3819 if (ret)
3596a065 3820 put_device(cdev_device);
be5eb933 3821
2637baed
MI
3822 return ret;
3823}
3824
3825static int nvme_ns_chr_open(struct inode *inode, struct file *file)
3826{
3827 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3828}
3829
3830static int nvme_ns_chr_release(struct inode *inode, struct file *file)
3831{
3832 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3833 return 0;
3834}
3835
3836static const struct file_operations nvme_ns_chr_fops = {
3837 .owner = THIS_MODULE,
3838 .open = nvme_ns_chr_open,
3839 .release = nvme_ns_chr_release,
3840 .unlocked_ioctl = nvme_ns_chr_ioctl,
3841 .compat_ioctl = compat_ptr_ioctl,
456cba38 3842 .uring_cmd = nvme_ns_chr_uring_cmd,
585079b6 3843 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
2637baed
MI
3844};
3845
3846static int nvme_add_ns_cdev(struct nvme_ns *ns)
3847{
3848 int ret;
3849
3850 ns->cdev_device.parent = ns->ctrl->device;
3851 ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3852 ns->ctrl->instance, ns->head->instance);
3853 if (ret)
3854 return ret;
be5eb933
AM
3855
3856 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3857 ns->ctrl->ops->module);
2637baed
MI
3858}
3859
ed754e5d 3860static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
1a893c2b 3861 struct nvme_ns_info *info)
ed754e5d
CH
3862{
3863 struct nvme_ns_head *head;
f3334447 3864 size_t size = sizeof(*head);
ed754e5d
CH
3865 int ret = -ENOMEM;
3866
f3334447
CH
3867#ifdef CONFIG_NVME_MULTIPATH
3868 size += num_possible_nodes() * sizeof(struct nvme_ns *);
3869#endif
3870
3871 head = kzalloc(size, GFP_KERNEL);
ed754e5d
CH
3872 if (!head)
3873 goto out;
8b850475 3874 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
ed754e5d
CH
3875 if (ret < 0)
3876 goto out_free_head;
3877 head->instance = ret;
3878 INIT_LIST_HEAD(&head->list);
fd92c77f
MG
3879 ret = init_srcu_struct(&head->srcu);
3880 if (ret)
3881 goto out_ida_remove;
ed754e5d 3882 head->subsys = ctrl->subsys;
1a893c2b
CH
3883 head->ns_id = info->nsid;
3884 head->ids = info->ids;
3885 head->shared = info->is_shared;
1d811438 3886 head->rotational = info->is_rotational;
a1a825ab
DW
3887 ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1);
3888 ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE);
ed754e5d
CH
3889 kref_init(&head->ref);
3890
be93e87e
KB
3891 if (head->ids.csi) {
3892 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3893 if (ret)
3894 goto out_cleanup_srcu;
3895 } else
3896 head->effects = ctrl->effects;
3897
32acab31
CH
3898 ret = nvme_mpath_alloc_disk(ctrl, head);
3899 if (ret)
3900 goto out_cleanup_srcu;
3901
ed754e5d 3902 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
12d9f070
JW
3903
3904 kref_get(&ctrl->subsys->ref);
3905
ed754e5d
CH
3906 return head;
3907out_cleanup_srcu:
3908 cleanup_srcu_struct(&head->srcu);
fd92c77f 3909out_ida_remove:
8b850475 3910 ida_free(&ctrl->subsys->ns_ida, head->instance);
ed754e5d
CH
3911out_free_head:
3912 kfree(head);
3913out:
538af88e
SG
3914 if (ret > 0)
3915 ret = blk_status_to_errno(nvme_error_status(ret));
ed754e5d
CH
3916 return ERR_PTR(ret);
3917}
3918
2079f41e
CH
3919static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
3920 struct nvme_ns_ids *ids)
3921{
3922 struct nvme_subsystem *s;
3923 int ret = 0;
3924
3925 /*
3926 * Note that this check is racy as we try to avoid holding the global
3927 * lock over the whole ns_head creation. But it is only intended as
3928 * a sanity check anyway.
3929 */
3930 mutex_lock(&nvme_subsystems_lock);
3931 list_for_each_entry(s, &nvme_subsystems, entry) {
3932 if (s == this)
3933 continue;
3934 mutex_lock(&s->lock);
3935 ret = nvme_subsys_check_duplicate_ids(s, ids);
3936 mutex_unlock(&s->lock);
3937 if (ret)
3938 break;
3939 }
3940 mutex_unlock(&nvme_subsystems_lock);
3941
3942 return ret;
3943}
3944
1a893c2b 3945static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
ed754e5d
CH
3946{
3947 struct nvme_ctrl *ctrl = ns->ctrl;
ed754e5d 3948 struct nvme_ns_head *head = NULL;
2079f41e
CH
3949 int ret;
3950
1a893c2b 3951 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
2079f41e 3952 if (ret) {
ac522fc6
CH
3953 /*
3954 * We've found two different namespaces on two different
3955 * subsystems that report the same ID. This is pretty nasty
3956 * for anything that actually requires unique device
3957 * identification. In the kernel we need this for multipathing,
3958 * and in user space the /dev/disk/by-id/ links rely on it.
3959 *
3960 * If the device also claims to be multi-path capable back off
3961 * here now and refuse the probe the second device as this is a
3962 * recipe for data corruption. If not this is probably a
3963 * cheap consumer device if on the PCIe bus, so let the user
3964 * proceed and use the shiny toy, but warn that with changing
3965 * probing order (which due to our async probing could just be
3966 * device taking longer to startup) the other device could show
3967 * up at any time.
3968 */
2f0dad17 3969 nvme_print_device_info(ctrl);
ac522fc6
CH
3970 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
3971 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
3972 info->is_shared)) {
3973 dev_err(ctrl->device,
3974 "ignoring nsid %d because of duplicate IDs\n",
3975 info->nsid);
3976 return ret;
3977 }
3978
3979 dev_err(ctrl->device,
3980 "clearing duplicate IDs for nsid %d\n", info->nsid);
3981 dev_err(ctrl->device,
3982 "use of /dev/disk/by-id/ may cause data corruption\n");
3983 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
3984 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
3985 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
3986 ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
2079f41e 3987 }
ed754e5d
CH
3988
3989 mutex_lock(&ctrl->subsys->lock);
1a893c2b 3990 head = nvme_find_ns_head(ctrl, info->nsid);
ed754e5d 3991 if (!head) {
1a893c2b 3992 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
e2d77d2e
CH
3993 if (ret) {
3994 dev_err(ctrl->device,
2079f41e 3995 "duplicate IDs in subsystem for nsid %d\n",
1a893c2b 3996 info->nsid);
e2d77d2e
CH
3997 goto out_unlock;
3998 }
1a893c2b 3999 head = nvme_alloc_ns_head(ctrl, info);
ed754e5d
CH
4000 if (IS_ERR(head)) {
4001 ret = PTR_ERR(head);
4002 goto out_unlock;
4003 }
ed754e5d 4004 } else {
6623c5b3 4005 ret = -EINVAL;
62188639
NS
4006 if ((!info->is_shared || !head->shared) &&
4007 !list_empty(&head->list)) {
9ad1927a 4008 dev_err(ctrl->device,
1a893c2b
CH
4009 "Duplicate unshared namespace %d\n",
4010 info->nsid);
6623c5b3 4011 goto out_put_ns_head;
9ad1927a 4012 }
1a893c2b 4013 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
ed754e5d
CH
4014 dev_err(ctrl->device,
4015 "IDs don't match for shared namespace %d\n",
1a893c2b 4016 info->nsid);
6623c5b3 4017 goto out_put_ns_head;
ed754e5d 4018 }
ce8d7861 4019
2110a6bc 4020 if (!multipath) {
ce8d7861
CH
4021 dev_warn(ctrl->device,
4022 "Found shared namespace %d, but multipathing not supported.\n",
1a893c2b 4023 info->nsid);
ce8d7861 4024 dev_warn_once(ctrl->device,
47f8cc9e 4025 "Shared namespace support requires core_nvme.multipath=Y.\n");
ce8d7861 4026 }
ed754e5d
CH
4027 }
4028
772ea326 4029 list_add_tail_rcu(&ns->siblings, &head->list);
ed754e5d 4030 ns->head = head;
6623c5b3 4031 mutex_unlock(&ctrl->subsys->lock);
dd2c1854
KB
4032
4033#ifdef CONFIG_NVME_MULTIPATH
4034 cancel_delayed_work(&head->remove_work);
4035#endif
6623c5b3 4036 return 0;
ed754e5d 4037
6623c5b3
CH
4038out_put_ns_head:
4039 nvme_put_ns_head(head);
ed754e5d
CH
4040out_unlock:
4041 mutex_unlock(&ctrl->subsys->lock);
4042 return ret;
4043}
4044
24493b8b 4045struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
5bae7f73 4046{
32f0c4af 4047 struct nvme_ns *ns, *ret = NULL;
be647e2c 4048 int srcu_idx;
69d3b8ac 4049
be647e2c 4050 srcu_idx = srcu_read_lock(&ctrl->srcu);
6d1c6994
BL
4051 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4052 srcu_read_lock_held(&ctrl->srcu)) {
ed754e5d 4053 if (ns->head->ns_id == nsid) {
4c74d1f8 4054 if (!nvme_get_ns(ns))
2dd41228 4055 continue;
32f0c4af
KB
4056 ret = ns;
4057 break;
4058 }
ed754e5d 4059 if (ns->head->ns_id > nsid)
5bae7f73
CH
4060 break;
4061 }
be647e2c 4062 srcu_read_unlock(&ctrl->srcu, srcu_idx);
32f0c4af 4063 return ret;
5bae7f73 4064}
cdd30ebb 4065EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, "NVME_TARGET_PASSTHRU");
5bae7f73 4066
298ba0e3
CH
4067/*
4068 * Add the namespace to the controller list while keeping the list ordered.
4069 */
4070static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
4071{
4072 struct nvme_ns *tmp;
4073
4074 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
4075 if (tmp->head->ns_id < ns->head->ns_id) {
be647e2c 4076 list_add_rcu(&ns->list, &tmp->list);
298ba0e3
CH
4077 return;
4078 }
4079 }
4080 list_add(&ns->list, &ns->ctrl->namespaces);
4081}
4082
1a893c2b 4083static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
5bae7f73 4084{
1a02f3a7 4085 struct queue_limits lim = { };
5bae7f73
CH
4086 struct nvme_ns *ns;
4087 struct gendisk *disk;
9953ab0c 4088 int node = ctrl->numa_node;
5bae7f73
CH
4089
4090 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
4091 if (!ns)
1a893c2b 4092 return;
5bae7f73 4093
1a02f3a7
CH
4094 if (ctrl->opts && ctrl->opts->data_digest)
4095 lim.features |= BLK_FEAT_STABLE_WRITES;
9c1e42e3
CH
4096 if (ctrl->ops->supports_pci_p2pdma &&
4097 ctrl->ops->supports_pci_p2pdma(ctrl))
4098 lim.features |= BLK_FEAT_PCI_P2PDMA;
1a02f3a7
CH
4099
4100 disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns);
5f432cce 4101 if (IS_ERR(disk))
ed754e5d 4102 goto out_free_ns;
5f432cce
CH
4103 disk->fops = &nvme_bdev_ops;
4104 disk->private_data = ns;
4105
4106 ns->disk = disk;
4107 ns->queue = disk->queue;
5bae7f73 4108 ns->ctrl = ctrl;
5bae7f73 4109 kref_init(&ns->kref);
5bae7f73 4110
1a893c2b 4111 if (nvme_init_ns_head(ns, info))
5f432cce 4112 goto out_cleanup_disk;
ac81bfa9 4113
9953ab0c 4114 /*
b739e137
CH
4115 * If multipathing is enabled, the device name for all disks and not
4116 * just those that represent shared namespaces needs to be based on the
4117 * subsystem instance. Using the controller instance for private
4118 * namespaces could lead to naming collisions between shared and private
4119 * namespaces if they don't use a common numbering scheme.
4120 *
4121 * If multipathing is not enabled, disk names must use the controller
4122 * instance as shared namespaces will show up as multiple block
4123 * devices.
9953ab0c 4124 */
35e797b0 4125 if (nvme_ns_head_multipath(ns->head)) {
b739e137
CH
4126 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
4127 ctrl->instance, ns->head->instance);
4128 disk->flags |= GENHD_FL_HIDDEN;
4129 } else if (multipath) {
4130 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
4131 ns->head->instance);
4132 } else {
9953ab0c
CH
4133 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
4134 ns->head->instance);
b739e137 4135 }
3dc87dd0 4136
1a893c2b 4137 if (nvme_update_ns_info(ns, info))
5f432cce 4138 goto out_unlink_ns;
5bae7f73 4139
be647e2c 4140 mutex_lock(&ctrl->namespaces_lock);
839a40d1
BH
4141 /*
4142 * Ensure that no namespaces are added to the ctrl list after the queues
4143 * are frozen, thereby avoiding a deadlock between scan and reset.
4144 */
4145 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
be647e2c 4146 mutex_unlock(&ctrl->namespaces_lock);
839a40d1
BH
4147 goto out_unlink_ns;
4148 }
298ba0e3 4149 nvme_ns_add_to_ctrl_list(ns);
be647e2c
KB
4150 mutex_unlock(&ctrl->namespaces_lock);
4151 synchronize_srcu(&ctrl->srcu);
d22524a4 4152 nvme_get_ctrl(ctrl);
ac81bfa9 4153
83ac678e 4154 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
ab3994f6
LC
4155 goto out_cleanup_ns_from_list;
4156
2637baed
MI
4157 if (!nvme_ns_head_multipath(ns->head))
4158 nvme_add_ns_cdev(ns);
32acab31 4159
1a893c2b 4160 nvme_mpath_add_disk(ns, info->anagrpid);
a3646451 4161 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
0d0b660f 4162
9f079dda
AA
4163 /*
4164 * Set ns->disk->device->driver_data to ns so we can access
e8c263ed
CK
4165 * ns->head->passthru_err_log_enabled in
4166 * nvme_io_passthru_err_log_enabled_[store | show]().
9f079dda
AA
4167 */
4168 dev_set_drvdata(disk_to_dev(ns->disk), ns);
4169
adce7e98 4170 return;
5f432cce 4171
ab3994f6
LC
4172 out_cleanup_ns_from_list:
4173 nvme_put_ctrl(ctrl);
be647e2c
KB
4174 mutex_lock(&ctrl->namespaces_lock);
4175 list_del_rcu(&ns->list);
4176 mutex_unlock(&ctrl->namespaces_lock);
4177 synchronize_srcu(&ctrl->srcu);
ed754e5d
CH
4178 out_unlink_ns:
4179 mutex_lock(&ctrl->subsys->lock);
4180 list_del_rcu(&ns->siblings);
d5675729
KB
4181 if (list_empty(&ns->head->list))
4182 list_del_init(&ns->head->entry);
ed754e5d 4183 mutex_unlock(&ctrl->subsys->lock);
a63b8370 4184 nvme_put_ns_head(ns->head);
5f432cce 4185 out_cleanup_disk:
8b9ab626 4186 put_disk(disk);
5bae7f73
CH
4187 out_free_ns:
4188 kfree(ns);
4189}
4190
4191static void nvme_ns_remove(struct nvme_ns *ns)
4192{
5396fdac
HR
4193 bool last_path = false;
4194
646017a6
KB
4195 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
4196 return;
69d3b8ac 4197
e7d65803 4198 clear_bit(NVME_NS_READY, &ns->flags);
0a05226a 4199 set_capacity(ns->disk, 0);
a3646451 4200 nvme_fault_inject_fini(&ns->fault_inject);
2181e455 4201
d6d67427
CL
4202 /*
4203 * Ensure that !NVME_NS_READY is seen by other threads to prevent
4204 * this ns going back into current_path.
4205 */
4206 synchronize_srcu(&ns->head->srcu);
4207
4208 /* wait for concurrent submissions */
4209 if (nvme_mpath_clear_current_path(ns))
4210 synchronize_srcu(&ns->head->srcu);
4211
2181e455
AE
4212 mutex_lock(&ns->ctrl->subsys->lock);
4213 list_del_rcu(&ns->siblings);
9edceaf4 4214 if (list_empty(&ns->head->list)) {
62188639
NS
4215 if (!nvme_mpath_queue_if_no_path(ns->head))
4216 list_del_init(&ns->head->entry);
9edceaf4
DW
4217 last_path = true;
4218 }
2181e455 4219 mutex_unlock(&ns->ctrl->subsys->lock);
d5675729 4220
041bd1a1 4221 /* guarantee not available in head->list */
899d2a05 4222 synchronize_srcu(&ns->head->srcu);
041bd1a1 4223
5eba2005
CH
4224 if (!nvme_ns_head_multipath(ns->head))
4225 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
4dbd2b2e
NS
4226
4227 nvme_mpath_remove_sysfs_link(ns);
4228
5eba2005 4229 del_gendisk(ns->disk);
32f0c4af 4230
be647e2c
KB
4231 mutex_lock(&ns->ctrl->namespaces_lock);
4232 list_del_rcu(&ns->list);
4233 mutex_unlock(&ns->ctrl->namespaces_lock);
4234 synchronize_srcu(&ns->ctrl->srcu);
32f0c4af 4235
5396fdac 4236 if (last_path)
9e221d8c 4237 nvme_mpath_remove_disk(ns->head);
5bae7f73
CH
4238 nvme_put_ns(ns);
4239}
4240
4450ba3b
CH
4241static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
4242{
4243 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
4244
4245 if (ns) {
4246 nvme_ns_remove(ns);
4247 nvme_put_ns(ns);
4248 }
4249}
4250
1a893c2b 4251static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
b2dc748a 4252{
dd0b0a4a 4253 int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
b2dc748a 4254
1a893c2b 4255 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
af5d6f7b 4256 dev_err(ns->ctrl->device,
b2dc748a 4257 "identifiers changed for nsid %d\n", ns->head->ns_id);
1a893c2b 4258 goto out;
b2dc748a
CH
4259 }
4260
1a893c2b 4261 ret = nvme_update_ns_info(ns, info);
b2dc748a
CH
4262out:
4263 /*
0a05226a 4264 * Only remove the namespace if we got a fatal error back from the
b2dc748a 4265 * device, otherwise ignore the error and just move on.
0a05226a
CH
4266 *
4267 * TODO: we should probably schedule a delayed retry here.
b2dc748a 4268 */
dd0b0a4a 4269 if (ret > 0 && (ret & NVME_STATUS_DNR))
0a05226a 4270 nvme_ns_remove(ns);
b2dc748a
CH
4271}
4272
04c170f6 4273static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
540c801c 4274{
1a893c2b 4275 struct nvme_ns_info info = { .nsid = nsid };
540c801c 4276 struct nvme_ns *ns;
ee9f36db 4277 int ret = 1;
540c801c 4278
1a893c2b 4279 if (nvme_identify_ns_descs(ctrl, &info))
8b7c0ff2 4280 return;
540c801c 4281
1a893c2b 4282 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
71882e7d
CH
4283 dev_warn(ctrl->device,
4284 "command set not reported for nsid: %d\n", nsid);
4285 return;
4286 }
4287
354201c5 4288 /*
1a893c2b
CH
4289 * If available try to use the Command Set Idependent Identify Namespace
4290 * data structure to find all the generic information that is needed to
4291 * set up a namespace. If not fall back to the legacy version.
354201c5 4292 */
eb867ee9 4293 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
ee9f36db
MB
4294 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS) ||
4295 ctrl->vs >= NVME_VS(2, 0, 0))
0dd6fff2 4296 ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
ee9f36db 4297 if (ret > 0)
0dd6fff2
CH
4298 ret = nvme_ns_info_from_identify(ctrl, &info);
4299
4300 if (info.is_removed)
4301 nvme_ns_remove_by_nsid(ctrl, nsid);
354201c5 4302
1a893c2b
CH
4303 /*
4304 * Ignore the namespace if it is not ready. We will get an AEN once it
4305 * becomes ready and restart the scan.
4306 */
0dd6fff2 4307 if (ret || !info.is_ready)
354201c5
CH
4308 return;
4309
32f0c4af 4310 ns = nvme_find_get_ns(ctrl, nsid);
8b7c0ff2 4311 if (ns) {
1a893c2b 4312 nvme_validate_ns(ns, &info);
8b7c0ff2 4313 nvme_put_ns(ns);
1a893c2b
CH
4314 } else {
4315 nvme_alloc_ns(ctrl, &info);
8b7c0ff2 4316 }
540c801c
KB
4317}
4318
4e893ca8
SH
4319/**
4320 * struct async_scan_info - keeps track of controller & NSIDs to scan
4321 * @ctrl: Controller on which namespaces are being scanned
4322 * @next_nsid: Index of next NSID to scan in ns_list
4323 * @ns_list: Pointer to list of NSIDs to scan
4324 *
4325 * Note: There is a single async_scan_info structure shared by all instances
4326 * of nvme_scan_ns_async() scanning a given controller, so the atomic
4327 * operations on next_nsid are critical to ensure each instance scans a unique
4328 * NSID.
4329 */
4330struct async_scan_info {
4331 struct nvme_ctrl *ctrl;
4332 atomic_t next_nsid;
4333 __le32 *ns_list;
4334};
4335
4336static void nvme_scan_ns_async(void *data, async_cookie_t cookie)
4337{
4338 struct async_scan_info *scan_info = data;
4339 int idx;
4340 u32 nsid;
4341
4342 idx = (u32)atomic_fetch_inc(&scan_info->next_nsid);
4343 nsid = le32_to_cpu(scan_info->ns_list[idx]);
4344
4345 nvme_scan_ns(scan_info->ctrl, nsid);
4346}
4347
47b0e50a
SB
4348static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
4349 unsigned nsid)
4350{
4351 struct nvme_ns *ns, *next;
6f8e0d78 4352 LIST_HEAD(rm_list);
47b0e50a 4353
be647e2c 4354 mutex_lock(&ctrl->namespaces_lock);
47b0e50a 4355 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
ff0ffe5b
KB
4356 if (ns->head->ns_id > nsid) {
4357 list_del_rcu(&ns->list);
4358 synchronize_srcu(&ctrl->srcu);
4359 list_add_tail_rcu(&ns->list, &rm_list);
4360 }
47b0e50a 4361 }
be647e2c 4362 mutex_unlock(&ctrl->namespaces_lock);
6f8e0d78
JW
4363
4364 list_for_each_entry_safe(ns, next, &rm_list, list)
4365 nvme_ns_remove(ns);
47b0e50a
SB
4366}
4367
4005f28d 4368static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
540c801c 4369{
aec459b4 4370 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
540c801c 4371 __le32 *ns_list;
4005f28d
CH
4372 u32 prev = 0;
4373 int ret = 0, i;
4e893ca8
SH
4374 ASYNC_DOMAIN(domain);
4375 struct async_scan_info scan_info;
540c801c 4376
42595eb7 4377 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
540c801c
KB
4378 if (!ns_list)
4379 return -ENOMEM;
4380
4e893ca8
SH
4381 scan_info.ctrl = ctrl;
4382 scan_info.ns_list = ns_list;
4005f28d 4383 for (;;) {
7b153362
CH
4384 struct nvme_command cmd = {
4385 .identify.opcode = nvme_admin_identify,
4386 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
4387 .identify.nsid = cpu_to_le32(prev),
4388 };
4389
4390 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
4391 NVME_IDENTIFY_DATA_SIZE);
f781f3dd
MI
4392 if (ret) {
4393 dev_warn(ctrl->device,
4394 "Identify NS List failed (status=0x%x)\n", ret);
47b0e50a 4395 goto free;
f781f3dd 4396 }
540c801c 4397
4e893ca8 4398 atomic_set(&scan_info.next_nsid, 0);
aec459b4 4399 for (i = 0; i < nr_entries; i++) {
4005f28d 4400 u32 nsid = le32_to_cpu(ns_list[i]);
540c801c 4401
4005f28d
CH
4402 if (!nsid) /* end of the list? */
4403 goto out;
4e893ca8
SH
4404 async_schedule_domain(nvme_scan_ns_async, &scan_info,
4405 &domain);
4450ba3b
CH
4406 while (++prev < nsid)
4407 nvme_ns_remove_by_nsid(ctrl, prev);
540c801c 4408 }
4e893ca8 4409 async_synchronize_full_domain(&domain);
540c801c
KB
4410 }
4411 out:
47b0e50a
SB
4412 nvme_remove_invalid_namespaces(ctrl, prev);
4413 free:
4e893ca8 4414 async_synchronize_full_domain(&domain);
540c801c
KB
4415 kfree(ns_list);
4416 return ret;
4417}
4418
4005f28d 4419static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
5bae7f73 4420{
4005f28d
CH
4421 struct nvme_id_ctrl *id;
4422 u32 nn, i;
4423
4424 if (nvme_identify_ctrl(ctrl, &id))
4425 return;
4426 nn = le32_to_cpu(id->nn);
4427 kfree(id);
5bae7f73 4428
540c801c 4429 for (i = 1; i <= nn; i++)
04c170f6 4430 nvme_scan_ns(ctrl, i);
540c801c 4431
47b0e50a 4432 nvme_remove_invalid_namespaces(ctrl, nn);
5bae7f73
CH
4433}
4434
f493af37 4435static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
30d90964
CH
4436{
4437 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
4438 __le32 *log;
f493af37 4439 int error;
30d90964
CH
4440
4441 log = kzalloc(log_size, GFP_KERNEL);
4442 if (!log)
f493af37 4443 return;
30d90964 4444
f493af37
CH
4445 /*
4446 * We need to read the log to clear the AEN, but we don't want to rely
4447 * on it for the changed namespace information as userspace could have
4448 * raced with us in reading the log page, which could cause us to miss
4449 * updates.
4450 */
be93e87e
KB
4451 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
4452 NVME_CSI_NVM, log, log_size, 0);
f493af37 4453 if (error)
30d90964
CH
4454 dev_warn(ctrl->device,
4455 "reading changed ns log failed: %d\n", error);
30d90964 4456
30d90964 4457 kfree(log);
30d90964
CH
4458}
4459
5955be21 4460static void nvme_scan_work(struct work_struct *work)
5bae7f73 4461{
5955be21
CH
4462 struct nvme_ctrl *ctrl =
4463 container_of(work, struct nvme_ctrl, scan_work);
78288665 4464 int ret;
5bae7f73 4465
5d02a5c1 4466 /* No tagset on a live ctrl means IO queues could not created */
e6e7f7ac 4467 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
5955be21
CH
4468 return;
4469
78288665
CK
4470 /*
4471 * Identify controller limits can change at controller reset due to
4472 * new firmware download, even though it is not common we cannot ignore
4473 * such scenario. Controller's non-mdts limits are reported in the unit
4474 * of logical blocks that is dependent on the format of attached
4475 * namespace. Hence re-read the limits at the time of ns allocation.
4476 */
4477 ret = nvme_init_non_mdts_limits(ctrl);
4478 if (ret < 0) {
4479 dev_warn(ctrl->device,
4480 "reading non-mdts-limits failed: %d\n", ret);
4481 return;
4482 }
4483
77016199 4484 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
30d90964 4485 dev_info(ctrl->device, "rescanning namespaces.\n");
f493af37 4486 nvme_clear_changed_ns_log(ctrl);
30d90964
CH
4487 }
4488
e7ad43c3 4489 mutex_lock(&ctrl->scan_lock);
f54f0d0e 4490 if (!nvme_id_cns_ok(ctrl, NVME_ID_CNS_NS_ACTIVE_LIST)) {
4005f28d 4491 nvme_scan_ns_sequential(ctrl);
811f4de0
US
4492 } else {
4493 /*
4494 * Fall back to sequential scan if DNR is set to handle broken
4495 * devices which should support Identify NS List (as per the VS
4496 * they report) but don't actually support it.
4497 */
4498 ret = nvme_scan_ns_list(ctrl);
dd0b0a4a 4499 if (ret > 0 && ret & NVME_STATUS_DNR)
811f4de0
US
4500 nvme_scan_ns_sequential(ctrl);
4501 }
e7ad43c3 4502 mutex_unlock(&ctrl->scan_lock);
9546ad1a
HR
4503
4504 /* Requeue if we have missed AENs */
4505 if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
4506 nvme_queue_scan(ctrl);
62baf70c 4507#ifdef CONFIG_NVME_MULTIPATH
26d7fb4f 4508 else if (ctrl->ana_log_buf)
62baf70c
HR
4509 /* Re-read the ANA log page to not miss updates */
4510 queue_work(nvme_wq, &ctrl->ana_work);
4511#endif
5955be21 4512}
5bae7f73 4513
32f0c4af
KB
4514/*
4515 * This function iterates the namespace list unlocked to allow recovery from
4516 * controller failure. It is up to the caller to ensure the namespace list is
4517 * not modified by scan work while this function is executing.
4518 */
5bae7f73
CH
4519void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
4520{
4521 struct nvme_ns *ns, *next;
6f8e0d78 4522 LIST_HEAD(ns_list);
5bae7f73 4523
0157ec8d
SG
4524 /*
4525 * make sure to requeue I/O to all namespaces as these
4526 * might result from the scan itself and must complete
4527 * for the scan_work to make progress
4528 */
4529 nvme_mpath_clear_ctrl_paths(ctrl);
4530
1b95e817
ML
4531 /*
4532 * Unquiesce io queues so any pending IO won't hang, especially
4533 * those submitted from scan work
4534 */
4535 nvme_unquiesce_io_queues(ctrl);
4536
f6c8e432
SG
4537 /* prevent racing with ns scanning */
4538 flush_work(&ctrl->scan_work);
4539
0ff9d4e1
KB
4540 /*
4541 * The dead states indicates the controller was not gracefully
4542 * disconnected. In that case, we won't be able to flush any data while
4543 * removing the namespaces' disks; fail all the queues now to avoid
4544 * potentially having to clean up the failed sync later.
4545 */
e6e7f7ac 4546 if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
cd50f9b2 4547 nvme_mark_namespaces_dead(ctrl);
0ff9d4e1 4548
ecca390e
SG
4549 /* this is a no-op when called from the controller reset handler */
4550 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
4551
be647e2c
KB
4552 mutex_lock(&ctrl->namespaces_lock);
4553 list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
4554 mutex_unlock(&ctrl->namespaces_lock);
4555 synchronize_srcu(&ctrl->srcu);
6f8e0d78
JW
4556
4557 list_for_each_entry_safe(ns, next, &ns_list, list)
5bae7f73
CH
4558 nvme_ns_remove(ns);
4559}
576d55d6 4560EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
5bae7f73 4561
23680f0b 4562static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env)
a42f42e5 4563{
23680f0b 4564 const struct nvme_ctrl *ctrl =
a42f42e5
SG
4565 container_of(dev, struct nvme_ctrl, ctrl_device);
4566 struct nvmf_ctrl_options *opts = ctrl->opts;
4567 int ret;
4568
4569 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4570 if (ret)
4571 return ret;
4572
4573 if (opts) {
4574 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4575 if (ret)
4576 return ret;
4577
4578 ret = add_uevent_var(env, "NVME_TRSVCID=%s",
4579 opts->trsvcid ?: "none");
4580 if (ret)
4581 return ret;
4582
4583 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
4584 opts->host_traddr ?: "none");
3ede8f72
MB
4585 if (ret)
4586 return ret;
4587
4588 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
4589 opts->host_iface ?: "none");
a42f42e5
SG
4590 }
4591 return ret;
4592}
4593
20d64911
MB
4594static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
4595{
4596 char *envp[2] = { envdata, NULL };
4597
4598 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4599}
4600
e3d7874d
KB
4601static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4602{
4603 char *envp[2] = { NULL, NULL };
4604 u32 aen_result = ctrl->aen_result;
4605
4606 ctrl->aen_result = 0;
4607 if (!aen_result)
4608 return;
4609
4610 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
4611 if (!envp[0])
4612 return;
4613 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4614 kfree(envp[0]);
4615}
4616
f866fc42
CH
4617static void nvme_async_event_work(struct work_struct *work)
4618{
4619 struct nvme_ctrl *ctrl =
4620 container_of(work, struct nvme_ctrl, async_event_work);
4621
e3d7874d 4622 nvme_aen_uevent(ctrl);
0fa0f99f
SG
4623
4624 /*
4625 * The transport drivers must guarantee AER submission here is safe by
4626 * flushing ctrl async_event_work after changing the controller state
4627 * from LIVE and before freeing the admin queue.
4628 */
e6e7f7ac 4629 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
0fa0f99f 4630 ctrl->ops->submit_async_event(ctrl);
f866fc42
CH
4631}
4632
b6dccf7f
AD
4633static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4634{
4635
4636 u32 csts;
4637
4638 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4639 return false;
4640
4641 if (csts == ~0)
4642 return false;
4643
4644 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4645}
4646
4647static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
4648{
b6dccf7f 4649 struct nvme_fw_slot_info_log *log;
f0377ff9 4650 u8 next_fw_slot, cur_fw_slot;
b6dccf7f
AD
4651
4652 log = kmalloc(sizeof(*log), GFP_KERNEL);
4653 if (!log)
4654 return;
4655
be93e87e 4656 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
983a338b 4657 log, sizeof(*log), 0)) {
0e98719b 4658 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
983a338b
DW
4659 goto out_free_log;
4660 }
4661
f0377ff9
ML
4662 cur_fw_slot = log->afi & 0x7;
4663 next_fw_slot = (log->afi & 0x70) >> 4;
4664 if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) {
983a338b
DW
4665 dev_info(ctrl->device,
4666 "Firmware is activated after next Controller Level Reset\n");
4667 goto out_free_log;
4668 }
4669
f0377ff9 4670 memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
983a338b
DW
4671 sizeof(ctrl->subsys->firmware_rev));
4672
4673out_free_log:
b6dccf7f
AD
4674 kfree(log);
4675}
4676
4677static void nvme_fw_act_work(struct work_struct *work)
4678{
4679 struct nvme_ctrl *ctrl = container_of(work,
4680 struct nvme_ctrl, fw_act_work);
4681 unsigned long fw_act_timeout;
4682
f6fe0b2d
ML
4683 nvme_auth_stop(ctrl);
4684
b6dccf7f 4685 if (ctrl->mtfa)
6cf828d2 4686 fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100);
b6dccf7f 4687 else
6cf828d2 4688 fw_act_timeout = jiffies + secs_to_jiffies(admin_timeout);
b6dccf7f 4689
9f27bd70 4690 nvme_quiesce_io_queues(ctrl);
b6dccf7f
AD
4691 while (nvme_ctrl_pp_status(ctrl)) {
4692 if (time_after(jiffies, fw_act_timeout)) {
4693 dev_warn(ctrl->device,
4694 "Fw activation timeout, reset controller\n");
4c75f877
KB
4695 nvme_try_sched_reset(ctrl);
4696 return;
b6dccf7f
AD
4697 }
4698 msleep(100);
4699 }
4700
650415fc
DW
4701 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
4702 !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
b6dccf7f
AD
4703 return;
4704
9f27bd70 4705 nvme_unquiesce_io_queues(ctrl);
a806c6c8 4706 /* read FW slot information to clear the AER */
b6dccf7f 4707 nvme_get_fw_slot_info(ctrl);
371a982c
KB
4708
4709 queue_work(nvme_wq, &ctrl->async_event_work);
b6dccf7f
AD
4710}
4711
2c61c97f
MK
4712static u32 nvme_aer_type(u32 result)
4713{
4714 return result & 0x7;
4715}
4716
4717static u32 nvme_aer_subtype(u32 result)
4718{
4719 return (result & 0xff00) >> 8;
4720}
4721
371a982c 4722static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
868c2392 4723{
2c61c97f 4724 u32 aer_notice_type = nvme_aer_subtype(result);
371a982c 4725 bool requeue = true;
09bd1ff4
CK
4726
4727 switch (aer_notice_type) {
868c2392 4728 case NVME_AER_NOTICE_NS_CHANGED:
77016199 4729 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
868c2392
CH
4730 nvme_queue_scan(ctrl);
4731 break;
4732 case NVME_AER_NOTICE_FW_ACT_STARTING:
4c75f877
KB
4733 /*
4734 * We are (ab)using the RESETTING state to prevent subsequent
4735 * recovery actions from interfering with the controller's
4736 * firmware activation.
4737 */
f50fff73 4738 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
371a982c 4739 requeue = false;
4c75f877 4740 queue_work(nvme_wq, &ctrl->fw_act_work);
f50fff73 4741 }
868c2392 4742 break;
0d0b660f
CH
4743#ifdef CONFIG_NVME_MULTIPATH
4744 case NVME_AER_NOTICE_ANA:
4745 if (!ctrl->ana_log_buf)
4746 break;
4747 queue_work(nvme_wq, &ctrl->ana_work);
4748 break;
4749#endif
85f8a435
SG
4750 case NVME_AER_NOTICE_DISC_CHANGED:
4751 ctrl->aen_result = result;
4752 break;
868c2392
CH
4753 default:
4754 dev_warn(ctrl->device, "async event result %08x\n", result);
4755 }
371a982c 4756 return requeue;
868c2392
CH
4757}
4758
2c61c97f
MK
4759static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
4760{
5a6d3a63
KB
4761 dev_warn(ctrl->device,
4762 "resetting controller due to persistent internal error\n");
2c61c97f
MK
4763 nvme_reset_ctrl(ctrl);
4764}
4765
7bf58533 4766void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
287a63eb 4767 volatile union nvme_result *res)
f866fc42 4768{
7bf58533 4769 u32 result = le32_to_cpu(res->u32);
2c61c97f
MK
4770 u32 aer_type = nvme_aer_type(result);
4771 u32 aer_subtype = nvme_aer_subtype(result);
371a982c 4772 bool requeue = true;
f866fc42 4773
ad22c355 4774 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
f866fc42
CH
4775 return;
4776
6622b76f 4777 trace_nvme_async_event(ctrl, result);
09bd1ff4 4778 switch (aer_type) {
868c2392 4779 case NVME_AER_NOTICE:
371a982c 4780 requeue = nvme_handle_aen_notice(ctrl, result);
868c2392 4781 break;
e3d7874d 4782 case NVME_AER_ERROR:
2c61c97f
MK
4783 /*
4784 * For a persistent internal error, don't run async_event_work
4785 * to submit a new AER. The controller reset will do it.
4786 */
4787 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
4788 nvme_handle_aer_persistent_error(ctrl);
4789 return;
4790 }
4791 fallthrough;
e3d7874d
KB
4792 case NVME_AER_SMART:
4793 case NVME_AER_CSS:
4794 case NVME_AER_VS:
4795 ctrl->aen_result = result;
7bf58533
CH
4796 break;
4797 default:
4798 break;
f866fc42 4799 }
371a982c
KB
4800
4801 if (requeue)
4802 queue_work(nvme_wq, &ctrl->async_event_work);
f866fc42 4803}
f866fc42 4804EXPORT_SYMBOL_GPL(nvme_complete_async_event);
f3ca80fc 4805
fe60e8c5 4806int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
db45e1a5 4807 const struct blk_mq_ops *ops, unsigned int cmd_size)
fe60e8c5 4808{
e6c9b130 4809 struct queue_limits lim = {};
fe60e8c5
CH
4810 int ret;
4811
4812 memset(set, 0, sizeof(*set));
4813 set->ops = ops;
4814 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
4815 if (ctrl->ops->flags & NVME_F_FABRICS)
de105068
CX
4816 /* Reserved for fabric connect and keep alive */
4817 set->reserved_tags = 2;
fe60e8c5 4818 set->numa_node = ctrl->numa_node;
db45e1a5
CH
4819 if (ctrl->ops->flags & NVME_F_BLOCKING)
4820 set->flags |= BLK_MQ_F_BLOCKING;
fe60e8c5
CH
4821 set->cmd_size = cmd_size;
4822 set->driver_data = ctrl;
4823 set->nr_hw_queues = 1;
4824 set->timeout = NVME_ADMIN_TIMEOUT;
4825 ret = blk_mq_alloc_tag_set(set);
4826 if (ret)
4827 return ret;
4828
e6c9b130 4829 ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
fe60e8c5
CH
4830 if (IS_ERR(ctrl->admin_q)) {
4831 ret = PTR_ERR(ctrl->admin_q);
4832 goto out_free_tagset;
4833 }
4834
4835 if (ctrl->ops->flags & NVME_F_FABRICS) {
9ac4dd8c 4836 ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL);
fe60e8c5
CH
4837 if (IS_ERR(ctrl->fabrics_q)) {
4838 ret = PTR_ERR(ctrl->fabrics_q);
4839 goto out_cleanup_admin_q;
4840 }
4841 }
4842
4843 ctrl->admin_tagset = set;
4844 return 0;
4845
4846out_cleanup_admin_q:
4739824e 4847 blk_mq_destroy_queue(ctrl->admin_q);
2b3f056f 4848 blk_put_queue(ctrl->admin_q);
fe60e8c5 4849out_free_tagset:
fd62678a
ML
4850 blk_mq_free_tag_set(set);
4851 ctrl->admin_q = NULL;
4852 ctrl->fabrics_q = NULL;
fe60e8c5
CH
4853 return ret;
4854}
4855EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
4856
4857void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
4858{
e9869c85
NS
4859 /*
4860 * As we're about to destroy the queue and free tagset
4861 * we can not have keep-alive work running.
4862 */
4863 nvme_stop_keep_alive(ctrl);
fe60e8c5 4864 blk_mq_destroy_queue(ctrl->admin_q);
2b3f056f
CH
4865 blk_put_queue(ctrl->admin_q);
4866 if (ctrl->ops->flags & NVME_F_FABRICS) {
fe60e8c5 4867 blk_mq_destroy_queue(ctrl->fabrics_q);
2b3f056f
CH
4868 blk_put_queue(ctrl->fabrics_q);
4869 }
fe60e8c5
CH
4870 blk_mq_free_tag_set(ctrl->admin_tagset);
4871}
4872EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
4873
4874int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
db45e1a5 4875 const struct blk_mq_ops *ops, unsigned int nr_maps,
fe60e8c5
CH
4876 unsigned int cmd_size)
4877{
4878 int ret;
4879
4880 memset(set, 0, sizeof(*set));
4881 set->ops = ops;
33b93727 4882 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
93b24f57
CH
4883 /*
4884 * Some Apple controllers requires tags to be unique across admin and
4885 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
4886 */
4887 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
4888 set->reserved_tags = NVME_AQ_DEPTH;
4889 else if (ctrl->ops->flags & NVME_F_FABRICS)
de105068
CX
4890 /* Reserved for fabric connect */
4891 set->reserved_tags = 1;
fe60e8c5 4892 set->numa_node = ctrl->numa_node;
db45e1a5
CH
4893 if (ctrl->ops->flags & NVME_F_BLOCKING)
4894 set->flags |= BLK_MQ_F_BLOCKING;
389e72c5 4895 set->cmd_size = cmd_size;
fe60e8c5
CH
4896 set->driver_data = ctrl;
4897 set->nr_hw_queues = ctrl->queue_count - 1;
4898 set->timeout = NVME_IO_TIMEOUT;
dcef7727 4899 set->nr_maps = nr_maps;
fe60e8c5
CH
4900 ret = blk_mq_alloc_tag_set(set);
4901 if (ret)
4902 return ret;
4903
4904 if (ctrl->ops->flags & NVME_F_FABRICS) {
8c8f5c85
CH
4905 struct queue_limits lim = {
4906 .features = BLK_FEAT_SKIP_TAGSET_QUIESCE,
4907 };
4908
4909 ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL);
fe60e8c5
CH
4910 if (IS_ERR(ctrl->connect_q)) {
4911 ret = PTR_ERR(ctrl->connect_q);
4912 goto out_free_tag_set;
4913 }
4914 }
4915
4916 ctrl->tagset = set;
4917 return 0;
4918
4919out_free_tag_set:
4920 blk_mq_free_tag_set(set);
6fbf13c0 4921 ctrl->connect_q = NULL;
fe60e8c5
CH
4922 return ret;
4923}
4924EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
4925
4926void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
4927{
2b3f056f 4928 if (ctrl->ops->flags & NVME_F_FABRICS) {
fe60e8c5 4929 blk_mq_destroy_queue(ctrl->connect_q);
2b3f056f
CH
4930 blk_put_queue(ctrl->connect_q);
4931 }
fe60e8c5
CH
4932 blk_mq_free_tag_set(ctrl->tagset);
4933}
4934EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
4935
d09f2b45 4936void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
576d55d6 4937{
0d0b660f 4938 nvme_mpath_stop(ctrl);
f50fff73 4939 nvme_auth_stop(ctrl);
8c4dfea9 4940 nvme_stop_failfast_work(ctrl);
f866fc42 4941 flush_work(&ctrl->async_event_work);
b6dccf7f 4942 cancel_work_sync(&ctrl->fw_act_work);
f7f70f4a
RL
4943 if (ctrl->ops->stop_ctrl)
4944 ctrl->ops->stop_ctrl(ctrl);
d09f2b45
SG
4945}
4946EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4947
4948void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4949{
93da4023
SG
4950 nvme_enable_aen(ctrl);
4951
f46ef9e8
SG
4952 /*
4953 * persistent discovery controllers need to send indication to userspace
4954 * to re-read the discovery log page to learn about possible changes
4955 * that were missed. We identify persistent discovery controllers by
4956 * checking that they started once before, hence are reconnecting back.
4957 */
2eb94dd5 4958 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
f46ef9e8
SG
4959 nvme_discovery_ctrl(ctrl))
4960 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
4961
d09f2b45
SG
4962 if (ctrl->queue_count > 1) {
4963 nvme_queue_scan(ctrl);
9f27bd70 4964 nvme_unquiesce_io_queues(ctrl);
a4a6f3c8 4965 nvme_mpath_update(ctrl);
d09f2b45 4966 }
20d64911
MB
4967
4968 nvme_change_uevent(ctrl, "NVME_EVENT=connected");
2eb94dd5 4969 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
d09f2b45
SG
4970}
4971EXPORT_SYMBOL_GPL(nvme_start_ctrl);
5955be21 4972
d09f2b45
SG
4973void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4974{
a54a93d0 4975 nvme_stop_keep_alive(ctrl);
ed7770f6 4976 nvme_hwmon_exit(ctrl);
f79d5fda 4977 nvme_fault_inject_fini(&ctrl->fault_inject);
510a405d 4978 dev_pm_qos_hide_latency_tolerance(ctrl->device);
a6a5149b 4979 cdev_device_del(&ctrl->cdev, ctrl->device);
726612b6 4980 nvme_put_ctrl(ctrl);
53029b04 4981}
576d55d6 4982EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
53029b04 4983
8168d23f
KB
4984static void nvme_free_cels(struct nvme_ctrl *ctrl)
4985{
4986 struct nvme_effects_log *cel;
4987 unsigned long i;
4988
8f8ea928 4989 xa_for_each(&ctrl->cels, i, cel) {
8168d23f
KB
4990 xa_erase(&ctrl->cels, i);
4991 kfree(cel);
4992 }
4993
4994 xa_destroy(&ctrl->cels);
4995}
4996
d22524a4 4997static void nvme_free_ctrl(struct device *dev)
53029b04 4998{
d22524a4
CH
4999 struct nvme_ctrl *ctrl =
5000 container_of(dev, struct nvme_ctrl, ctrl_device);
ab9e00cc 5001 struct nvme_subsystem *subsys = ctrl->subsys;
f3ca80fc 5002
192f6c29 5003 if (!subsys || ctrl->instance != subsys->instance)
8b850475 5004 ida_free(&nvme_instance_ida, ctrl->instance);
8168d23f 5005 nvme_free_cels(ctrl);
0d0b660f 5006 nvme_mpath_uninit(ctrl);
be647e2c 5007 cleanup_srcu_struct(&ctrl->srcu);
f50fff73
HR
5008 nvme_auth_stop(ctrl);
5009 nvme_auth_free(ctrl);
092ff052 5010 __free_page(ctrl->discard_page);
94cc781f 5011 free_opal_dev(ctrl->opal_dev);
f3ca80fc 5012
ab9e00cc 5013 if (subsys) {
32fd90c4 5014 mutex_lock(&nvme_subsystems_lock);
ab9e00cc 5015 list_del(&ctrl->subsys_entry);
ab9e00cc 5016 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
32fd90c4 5017 mutex_unlock(&nvme_subsystems_lock);
ab9e00cc 5018 }
f3ca80fc
CH
5019
5020 ctrl->ops->free_ctrl(ctrl);
f3ca80fc 5021
ab9e00cc
CH
5022 if (subsys)
5023 nvme_put_subsystem(subsys);
f3ca80fc
CH
5024}
5025
5026/*
5027 * Initialize a NVMe controller structures. This needs to be called during
5028 * earliest initialization so that we have the initialized structured around
5029 * during probing.
1a9e2181
KB
5030 *
5031 * On success, the caller must use the nvme_put_ctrl() to release this when
5032 * needed, which also invokes the ops->free_ctrl() callback.
f3ca80fc
CH
5033 */
5034int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
5035 const struct nvme_ctrl_ops *ops, unsigned long quirks)
5036{
5037 int ret;
5038
e6e7f7ac 5039 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
9f079dda 5040 ctrl->passthru_err_log_enabled = false;
8c4dfea9 5041 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
bb8d261e 5042 spin_lock_init(&ctrl->lock);
be647e2c
KB
5043 mutex_init(&ctrl->namespaces_lock);
5044
5045 ret = init_srcu_struct(&ctrl->srcu);
5046 if (ret)
5047 return ret;
5048
e7ad43c3 5049 mutex_init(&ctrl->scan_lock);
f3ca80fc 5050 INIT_LIST_HEAD(&ctrl->namespaces);
1cf7a12e 5051 xa_init(&ctrl->cels);
f3ca80fc
CH
5052 ctrl->dev = dev;
5053 ctrl->ops = ops;
5054 ctrl->quirks = quirks;
4fea243e 5055 ctrl->numa_node = NUMA_NO_NODE;
5955be21 5056 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
f866fc42 5057 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
b6dccf7f 5058 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
c5017e85 5059 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
c1ac9a4b 5060 init_waitqueue_head(&ctrl->state_wq);
f3ca80fc 5061
230f1f9e 5062 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
8c4dfea9 5063 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
230f1f9e
JS
5064 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
5065 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
136cfcb8 5066 ctrl->ka_last_check_time = jiffies;
230f1f9e 5067
cb5b7262
JA
5068 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
5069 PAGE_SIZE);
5070 ctrl->discard_page = alloc_page(GFP_KERNEL);
5071 if (!ctrl->discard_page) {
5072 ret = -ENOMEM;
5073 goto out;
5074 }
5075
8b850475 5076 ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
9843f685 5077 if (ret < 0)
f3ca80fc 5078 goto out;
9843f685 5079 ctrl->instance = ret;
f3ca80fc 5080
1a9e2181
KB
5081 ret = nvme_auth_init_ctrl(ctrl);
5082 if (ret)
5083 goto out_release_instance;
5084
5085 nvme_mpath_init_ctrl(ctrl);
5086
d22524a4
CH
5087 device_initialize(&ctrl->ctrl_device);
5088 ctrl->device = &ctrl->ctrl_device;
f68abd9c
JG
5089 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
5090 ctrl->instance);
ab21f3d9 5091 ctrl->device->class = &nvme_class;
d22524a4 5092 ctrl->device->parent = ctrl->dev;
86adbf0c
CH
5093 if (ops->dev_attr_groups)
5094 ctrl->device->groups = ops->dev_attr_groups;
5095 else
5096 ctrl->device->groups = nvme_dev_attr_groups;
d22524a4
CH
5097 ctrl->device->release = nvme_free_ctrl;
5098 dev_set_drvdata(ctrl->device, ctrl);
1a9e2181
KB
5099
5100 return ret;
5101
5102out_release_instance:
5103 ida_free(&nvme_instance_ida, ctrl->instance);
5104out:
5105 if (ctrl->discard_page)
5106 __free_page(ctrl->discard_page);
5107 cleanup_srcu_struct(&ctrl->srcu);
5108 return ret;
5109}
5110EXPORT_SYMBOL_GPL(nvme_init_ctrl);
5111
5112/*
5113 * On success, returns with an elevated controller reference and caller must
5114 * use nvme_uninit_ctrl() to properly free resources associated with the ctrl.
5115 */
5116int nvme_add_ctrl(struct nvme_ctrl *ctrl)
5117{
5118 int ret;
5119
d22524a4
CH
5120 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
5121 if (ret)
1a9e2181 5122 return ret;
f3ca80fc 5123
a6a5149b 5124 cdev_init(&ctrl->cdev, &nvme_dev_fops);
1a9e2181 5125 ctrl->cdev.owner = ctrl->ops->module;
a6a5149b 5126 ret = cdev_device_add(&ctrl->cdev, ctrl->device);
d22524a4 5127 if (ret)
1a9e2181 5128 return ret;
f3ca80fc 5129
c5552fde
AL
5130 /*
5131 * Initialize latency tolerance controls. The sysfs files won't
5132 * be visible to userspace unless the device actually supports APST.
5133 */
5134 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
5135 dev_pm_qos_update_user_latency_tolerance(ctrl->device,
5136 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
5137
f79d5fda 5138 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
1a9e2181 5139 nvme_get_ctrl(ctrl);
f79d5fda 5140
f3ca80fc 5141 return 0;
f3ca80fc 5142}
1a9e2181 5143EXPORT_SYMBOL_GPL(nvme_add_ctrl);
f3ca80fc 5144
cd50f9b2
CH
5145/* let I/O to all namespaces fail in preparation for surprise removal */
5146void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
69d9a99c
KB
5147{
5148 struct nvme_ns *ns;
be647e2c 5149 int srcu_idx;
69d9a99c 5150
be647e2c 5151 srcu_idx = srcu_read_lock(&ctrl->srcu);
6d1c6994
BL
5152 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5153 srcu_read_lock_held(&ctrl->srcu))
cd50f9b2 5154 blk_mark_disk_dead(ns->disk);
be647e2c 5155 srcu_read_unlock(&ctrl->srcu, srcu_idx);
69d9a99c 5156}
cd50f9b2 5157EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
69d9a99c 5158
302ad8cc
KB
5159void nvme_unfreeze(struct nvme_ctrl *ctrl)
5160{
5161 struct nvme_ns *ns;
be647e2c 5162 int srcu_idx;
302ad8cc 5163
be647e2c 5164 srcu_idx = srcu_read_lock(&ctrl->srcu);
6d1c6994
BL
5165 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5166 srcu_read_lock_held(&ctrl->srcu))
6b6f6c41 5167 blk_mq_unfreeze_queue_non_owner(ns->queue);
be647e2c 5168 srcu_read_unlock(&ctrl->srcu, srcu_idx);
839a40d1 5169 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
302ad8cc
KB
5170}
5171EXPORT_SYMBOL_GPL(nvme_unfreeze);
5172
7cf0d7c0 5173int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
302ad8cc
KB
5174{
5175 struct nvme_ns *ns;
be647e2c 5176 int srcu_idx;
302ad8cc 5177
be647e2c 5178 srcu_idx = srcu_read_lock(&ctrl->srcu);
6d1c6994
BL
5179 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5180 srcu_read_lock_held(&ctrl->srcu)) {
302ad8cc
KB
5181 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
5182 if (timeout <= 0)
5183 break;
5184 }
be647e2c 5185 srcu_read_unlock(&ctrl->srcu, srcu_idx);
7cf0d7c0 5186 return timeout;
302ad8cc
KB
5187}
5188EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
5189
5190void nvme_wait_freeze(struct nvme_ctrl *ctrl)
5191{
5192 struct nvme_ns *ns;
be647e2c 5193 int srcu_idx;
302ad8cc 5194
be647e2c 5195 srcu_idx = srcu_read_lock(&ctrl->srcu);
6d1c6994
BL
5196 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5197 srcu_read_lock_held(&ctrl->srcu))
302ad8cc 5198 blk_mq_freeze_queue_wait(ns->queue);
be647e2c 5199 srcu_read_unlock(&ctrl->srcu, srcu_idx);
302ad8cc
KB
5200}
5201EXPORT_SYMBOL_GPL(nvme_wait_freeze);
5202
5203void nvme_start_freeze(struct nvme_ctrl *ctrl)
5204{
5205 struct nvme_ns *ns;
be647e2c 5206 int srcu_idx;
302ad8cc 5207
839a40d1 5208 set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
be647e2c 5209 srcu_idx = srcu_read_lock(&ctrl->srcu);
6d1c6994
BL
5210 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5211 srcu_read_lock_held(&ctrl->srcu))
6b6f6c41
ML
5212 /*
5213 * Typical non_owner use case is from pci driver, in which
5214 * start_freeze is called from timeout work function, but
5215 * unfreeze is done in reset work context
5216 */
5217 blk_freeze_queue_start_non_owner(ns->queue);
be647e2c 5218 srcu_read_unlock(&ctrl->srcu, srcu_idx);
302ad8cc
KB
5219}
5220EXPORT_SYMBOL_GPL(nvme_start_freeze);
5221
9f27bd70 5222void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl)
363c9aac 5223{
ba0718a6
CH
5224 if (!ctrl->tagset)
5225 return;
98d81f0d
CL
5226 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
5227 blk_mq_quiesce_tagset(ctrl->tagset);
5228 else
5229 blk_mq_wait_quiesce_done(ctrl->tagset);
363c9aac 5230}
9f27bd70 5231EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues);
363c9aac 5232
9f27bd70 5233void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl)
363c9aac 5234{
ba0718a6
CH
5235 if (!ctrl->tagset)
5236 return;
98d81f0d
CL
5237 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
5238 blk_mq_unquiesce_tagset(ctrl->tagset);
363c9aac 5239}
9f27bd70 5240EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues);
363c9aac 5241
9f27bd70 5242void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
a277654b 5243{
9e6a6b12
ML
5244 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
5245 blk_mq_quiesce_queue(ctrl->admin_q);
26af1cd0 5246 else
483239c7 5247 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
a277654b 5248}
9f27bd70 5249EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue);
a277654b 5250
9f27bd70 5251void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl)
a277654b 5252{
9e6a6b12
ML
5253 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
5254 blk_mq_unquiesce_queue(ctrl->admin_q);
a277654b 5255}
9f27bd70 5256EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
a277654b 5257
04800fbf 5258void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
d6135c3a
KB
5259{
5260 struct nvme_ns *ns;
be647e2c 5261 int srcu_idx;
d6135c3a 5262
be647e2c 5263 srcu_idx = srcu_read_lock(&ctrl->srcu);
6d1c6994
BL
5264 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5265 srcu_read_lock_held(&ctrl->srcu))
d6135c3a 5266 blk_sync_queue(ns->queue);
be647e2c 5267 srcu_read_unlock(&ctrl->srcu, srcu_idx);
04800fbf
CL
5268}
5269EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
03894b7a 5270
04800fbf
CL
5271void nvme_sync_queues(struct nvme_ctrl *ctrl)
5272{
5273 nvme_sync_io_queues(ctrl);
03894b7a
EN
5274 if (ctrl->admin_q)
5275 blk_sync_queue(ctrl->admin_q);
d6135c3a
KB
5276}
5277EXPORT_SYMBOL_GPL(nvme_sync_queues);
5278
b2702aaa 5279struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
f783f444 5280{
b2702aaa
CK
5281 if (file->f_op != &nvme_dev_fops)
5282 return NULL;
5283 return file->private_data;
f783f444 5284}
cdd30ebb 5285EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, "NVME_TARGET_PASSTHRU");
f783f444 5286
81101540
CH
5287/*
5288 * Check we didn't inadvertently grow the command structure sizes:
5289 */
5290static inline void _nvme_check_size(void)
5291{
5292 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
5293 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
5294 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
5295 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
5296 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
5297 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
5298 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
5299 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
5300 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
5301 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
5302 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
5303 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
5304 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
354201c5
CH
5305 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) !=
5306 NVME_IDENTIFY_DATA_SIZE);
240e6ee2 5307 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
4020aad8 5308 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE);
240e6ee2 5309 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
5befc7c2 5310 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
81101540
CH
5311 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
5312 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
266b652c 5313 BUILD_BUG_ON(sizeof(struct nvme_endurance_group_log) != 512);
5fd075cd 5314 BUILD_BUG_ON(sizeof(struct nvme_rotational_media_log) != 512);
81101540
CH
5315 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
5316 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4020aad8 5317 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
81101540
CH
5318}
5319
5320
893a74b7 5321static int __init nvme_core_init(void)
5bae7f73 5322{
43d5d3b4 5323 unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS;
b227c59b 5324 int result = -ENOMEM;
5bae7f73 5325
81101540
CH
5326 _nvme_check_size();
5327
43d5d3b4 5328 nvme_wq = alloc_workqueue("nvme-wq", wq_flags, 0);
9a6327d2 5329 if (!nvme_wq)
b227c59b
RS
5330 goto out;
5331
43d5d3b4 5332 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", wq_flags, 0);
b227c59b
RS
5333 if (!nvme_reset_wq)
5334 goto destroy_wq;
5335
43d5d3b4 5336 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", wq_flags, 0);
b227c59b
RS
5337 if (!nvme_delete_wq)
5338 goto destroy_reset_wq;
9a6327d2 5339
f68abd9c
JG
5340 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
5341 NVME_MINORS, "nvme");
f3ca80fc 5342 if (result < 0)
b227c59b 5343 goto destroy_delete_wq;
f3ca80fc 5344
ab21f3d9
RM
5345 result = class_register(&nvme_class);
5346 if (result)
f3ca80fc 5347 goto unregister_chrdev;
f3ca80fc 5348
ab21f3d9
RM
5349 result = class_register(&nvme_subsys_class);
5350 if (result)
ab9e00cc 5351 goto destroy_class;
2637baed
MI
5352
5353 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
5354 "nvme-generic");
5355 if (result < 0)
5356 goto destroy_subsys_class;
5357
ab21f3d9
RM
5358 result = class_register(&nvme_ns_chr_class);
5359 if (result)
2637baed 5360 goto unregister_generic_ns;
ab21f3d9 5361
9d77eb52
HR
5362 result = nvme_init_auth();
5363 if (result)
706add13 5364 goto destroy_ns_chr;
5bae7f73 5365 return 0;
f3ca80fc 5366
e481fc0a 5367destroy_ns_chr:
ab21f3d9 5368 class_unregister(&nvme_ns_chr_class);
2637baed
MI
5369unregister_generic_ns:
5370 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
5371destroy_subsys_class:
ab21f3d9 5372 class_unregister(&nvme_subsys_class);
ab9e00cc 5373destroy_class:
ab21f3d9 5374 class_unregister(&nvme_class);
9a6327d2 5375unregister_chrdev:
f68abd9c 5376 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
b227c59b
RS
5377destroy_delete_wq:
5378 destroy_workqueue(nvme_delete_wq);
5379destroy_reset_wq:
5380 destroy_workqueue(nvme_reset_wq);
9a6327d2
SG
5381destroy_wq:
5382 destroy_workqueue(nvme_wq);
b227c59b 5383out:
f3ca80fc 5384 return result;
5bae7f73
CH
5385}
5386
893a74b7 5387static void __exit nvme_core_exit(void)
5bae7f73 5388{
e481fc0a 5389 nvme_exit_auth();
ab21f3d9
RM
5390 class_unregister(&nvme_ns_chr_class);
5391 class_unregister(&nvme_subsys_class);
5392 class_unregister(&nvme_class);
2637baed 5393 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
f68abd9c 5394 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
b227c59b
RS
5395 destroy_workqueue(nvme_delete_wq);
5396 destroy_workqueue(nvme_reset_wq);
9a6327d2 5397 destroy_workqueue(nvme_wq);
2637baed 5398 ida_destroy(&nvme_ns_chr_minor_ida);
f41cfd5d 5399 ida_destroy(&nvme_instance_ida);
5bae7f73 5400}
576d55d6
ML
5401
5402MODULE_LICENSE("GPL");
5403MODULE_VERSION("1.0");
92b0b0ff 5404MODULE_DESCRIPTION("NVMe host core framework");
576d55d6
ML
5405module_init(nvme_core_init);
5406module_exit(nvme_core_exit);