Commit | Line | Data |
---|---|---|
77141dc6 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a07b4970 CH |
2 | /* |
3 | * Common code for the NVMe target. | |
4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. | |
a07b4970 CH |
5 | */ |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | #include <linux/module.h> | |
28b89118 | 8 | #include <linux/random.h> |
b2d09103 | 9 | #include <linux/rculist.h> |
c6925093 | 10 | #include <linux/pci-p2pdma.h> |
a5dffbb6 | 11 | #include <linux/scatterlist.h> |
b2d09103 | 12 | |
a5448fdc MI |
13 | #define CREATE_TRACE_POINTS |
14 | #include "trace.h" | |
15 | ||
a07b4970 CH |
16 | #include "nvmet.h" |
17 | ||
55eb942e | 18 | struct workqueue_struct *buffered_io_wq; |
e929f06d | 19 | static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; |
15fbad96 | 20 | static DEFINE_IDA(cntlid_ida); |
a07b4970 CH |
21 | |
22 | /* | |
23 | * This read/write semaphore is used to synchronize access to configuration | |
24 | * information on a target system that will result in discovery log page | |
25 | * information change for at least one host. | |
26 | * The full list of resources to protected by this semaphore is: | |
27 | * | |
28 | * - subsystems list | |
29 | * - per-subsystem allowed hosts list | |
30 | * - allow_any_host subsystem attribute | |
31 | * - nvmet_genctr | |
32 | * - the nvmet_transports array | |
33 | * | |
34 | * When updating any of those lists/structures write lock should be obtained, | |
35 | * while when reading (popolating discovery log page or checking host-subsystem | |
36 | * link) read lock is obtained to allow concurrent reads. | |
37 | */ | |
38 | DECLARE_RWSEM(nvmet_config_sem); | |
39 | ||
72efd25d CH |
40 | u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; |
41 | u64 nvmet_ana_chgcnt; | |
42 | DECLARE_RWSEM(nvmet_ana_sem); | |
43 | ||
c6aa3542 CK |
44 | inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) |
45 | { | |
46 | u16 status; | |
47 | ||
48 | switch (errno) { | |
cfc1a1af LG |
49 | case 0: |
50 | status = NVME_SC_SUCCESS; | |
51 | break; | |
c6aa3542 CK |
52 | case -ENOSPC: |
53 | req->error_loc = offsetof(struct nvme_rw_command, length); | |
54 | status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; | |
55 | break; | |
56 | case -EREMOTEIO: | |
57 | req->error_loc = offsetof(struct nvme_rw_command, slba); | |
58 | status = NVME_SC_LBA_RANGE | NVME_SC_DNR; | |
59 | break; | |
60 | case -EOPNOTSUPP: | |
61 | req->error_loc = offsetof(struct nvme_common_command, opcode); | |
62 | switch (req->cmd->common.opcode) { | |
63 | case nvme_cmd_dsm: | |
64 | case nvme_cmd_write_zeroes: | |
65 | status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; | |
66 | break; | |
67 | default: | |
68 | status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; | |
69 | } | |
70 | break; | |
71 | case -ENODATA: | |
72 | req->error_loc = offsetof(struct nvme_rw_command, nsid); | |
73 | status = NVME_SC_ACCESS_DENIED; | |
74 | break; | |
75 | case -EIO: | |
df561f66 | 76 | fallthrough; |
c6aa3542 CK |
77 | default: |
78 | req->error_loc = offsetof(struct nvme_common_command, opcode); | |
79 | status = NVME_SC_INTERNAL | NVME_SC_DNR; | |
80 | } | |
81 | ||
82 | return status; | |
83 | } | |
84 | ||
a07b4970 CH |
85 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, |
86 | const char *subsysnqn); | |
87 | ||
88 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, | |
89 | size_t len) | |
90 | { | |
e81446af CK |
91 | if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { |
92 | req->error_loc = offsetof(struct nvme_common_command, dptr); | |
a07b4970 | 93 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
e81446af | 94 | } |
a07b4970 CH |
95 | return 0; |
96 | } | |
97 | ||
98 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) | |
99 | { | |
e81446af CK |
100 | if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { |
101 | req->error_loc = offsetof(struct nvme_common_command, dptr); | |
a07b4970 | 102 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
e81446af | 103 | } |
a07b4970 CH |
104 | return 0; |
105 | } | |
106 | ||
c7759fff CH |
107 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) |
108 | { | |
e81446af CK |
109 | if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { |
110 | req->error_loc = offsetof(struct nvme_common_command, dptr); | |
c7759fff | 111 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
e81446af | 112 | } |
c7759fff CH |
113 | return 0; |
114 | } | |
115 | ||
ba2dec35 RS |
116 | static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys) |
117 | { | |
7774e77e CK |
118 | unsigned long nsid = 0; |
119 | struct nvmet_ns *cur; | |
120 | unsigned long idx; | |
ba2dec35 | 121 | |
7774e77e CK |
122 | xa_for_each(&subsys->namespaces, idx, cur) |
123 | nsid = cur->nsid; | |
ba2dec35 | 124 | |
7774e77e | 125 | return nsid; |
ba2dec35 RS |
126 | } |
127 | ||
a07b4970 CH |
128 | static u32 nvmet_async_event_result(struct nvmet_async_event *aen) |
129 | { | |
130 | return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); | |
131 | } | |
132 | ||
819f7b88 CK |
133 | static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) |
134 | { | |
135 | u16 status = NVME_SC_INTERNAL | NVME_SC_DNR; | |
136 | struct nvmet_req *req; | |
137 | ||
138 | mutex_lock(&ctrl->lock); | |
139 | while (ctrl->nr_async_event_cmds) { | |
140 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; | |
141 | mutex_unlock(&ctrl->lock); | |
142 | nvmet_req_complete(req, status); | |
143 | mutex_lock(&ctrl->lock); | |
144 | } | |
145 | mutex_unlock(&ctrl->lock); | |
146 | } | |
147 | ||
148 | static void nvmet_async_events_process(struct nvmet_ctrl *ctrl) | |
a07b4970 | 149 | { |
a07b4970 CH |
150 | struct nvmet_async_event *aen; |
151 | struct nvmet_req *req; | |
152 | ||
1cdf9f76 DM |
153 | mutex_lock(&ctrl->lock); |
154 | while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) { | |
155 | aen = list_first_entry(&ctrl->async_events, | |
156 | struct nvmet_async_event, entry); | |
a07b4970 | 157 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; |
819f7b88 | 158 | nvmet_set_result(req, nvmet_async_event_result(aen)); |
a07b4970 CH |
159 | |
160 | list_del(&aen->entry); | |
161 | kfree(aen); | |
162 | ||
163 | mutex_unlock(&ctrl->lock); | |
696ece75 | 164 | trace_nvmet_async_event(ctrl, req->cqe->result.u32); |
819f7b88 | 165 | nvmet_req_complete(req, 0); |
1cdf9f76 | 166 | mutex_lock(&ctrl->lock); |
a07b4970 | 167 | } |
1cdf9f76 | 168 | mutex_unlock(&ctrl->lock); |
a07b4970 CH |
169 | } |
170 | ||
0f5be6a4 DW |
171 | static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) |
172 | { | |
64f5e9cd | 173 | struct nvmet_async_event *aen, *tmp; |
0f5be6a4 DW |
174 | |
175 | mutex_lock(&ctrl->lock); | |
64f5e9cd SG |
176 | list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { |
177 | list_del(&aen->entry); | |
178 | kfree(aen); | |
0f5be6a4 DW |
179 | } |
180 | mutex_unlock(&ctrl->lock); | |
181 | } | |
182 | ||
183 | static void nvmet_async_event_work(struct work_struct *work) | |
184 | { | |
185 | struct nvmet_ctrl *ctrl = | |
186 | container_of(work, struct nvmet_ctrl, async_event_work); | |
187 | ||
819f7b88 | 188 | nvmet_async_events_process(ctrl); |
0f5be6a4 DW |
189 | } |
190 | ||
b662a078 | 191 | void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
a07b4970 CH |
192 | u8 event_info, u8 log_page) |
193 | { | |
194 | struct nvmet_async_event *aen; | |
195 | ||
196 | aen = kmalloc(sizeof(*aen), GFP_KERNEL); | |
197 | if (!aen) | |
198 | return; | |
199 | ||
200 | aen->event_type = event_type; | |
201 | aen->event_info = event_info; | |
202 | aen->log_page = log_page; | |
203 | ||
204 | mutex_lock(&ctrl->lock); | |
205 | list_add_tail(&aen->entry, &ctrl->async_events); | |
206 | mutex_unlock(&ctrl->lock); | |
207 | ||
208 | schedule_work(&ctrl->async_event_work); | |
209 | } | |
210 | ||
c16734ea CH |
211 | static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) |
212 | { | |
213 | u32 i; | |
214 | ||
215 | mutex_lock(&ctrl->lock); | |
216 | if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES) | |
217 | goto out_unlock; | |
218 | ||
219 | for (i = 0; i < ctrl->nr_changed_ns; i++) { | |
220 | if (ctrl->changed_ns_list[i] == nsid) | |
221 | goto out_unlock; | |
222 | } | |
223 | ||
224 | if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) { | |
225 | ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff); | |
226 | ctrl->nr_changed_ns = U32_MAX; | |
227 | goto out_unlock; | |
228 | } | |
229 | ||
230 | ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid; | |
231 | out_unlock: | |
232 | mutex_unlock(&ctrl->lock); | |
233 | } | |
234 | ||
dedf0be5 | 235 | void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) |
c16734ea CH |
236 | { |
237 | struct nvmet_ctrl *ctrl; | |
238 | ||
013a63ef MG |
239 | lockdep_assert_held(&subsys->lock); |
240 | ||
c16734ea CH |
241 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
242 | nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); | |
7114ddeb | 243 | if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) |
c86b8f7b | 244 | continue; |
c16734ea CH |
245 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, |
246 | NVME_AER_NOTICE_NS_CHANGED, | |
247 | NVME_LOG_CHANGED_NS); | |
248 | } | |
249 | } | |
250 | ||
62ac0d32 CH |
251 | void nvmet_send_ana_event(struct nvmet_subsys *subsys, |
252 | struct nvmet_port *port) | |
253 | { | |
254 | struct nvmet_ctrl *ctrl; | |
255 | ||
256 | mutex_lock(&subsys->lock); | |
257 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { | |
258 | if (port && ctrl->port != port) | |
259 | continue; | |
7114ddeb | 260 | if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) |
62ac0d32 CH |
261 | continue; |
262 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, | |
263 | NVME_AER_NOTICE_ANA, NVME_LOG_ANA); | |
264 | } | |
265 | mutex_unlock(&subsys->lock); | |
266 | } | |
267 | ||
268 | void nvmet_port_send_ana_event(struct nvmet_port *port) | |
269 | { | |
270 | struct nvmet_subsys_link *p; | |
271 | ||
272 | down_read(&nvmet_config_sem); | |
273 | list_for_each_entry(p, &port->subsystems, entry) | |
274 | nvmet_send_ana_event(p->subsys, port); | |
275 | up_read(&nvmet_config_sem); | |
276 | } | |
277 | ||
e929f06d | 278 | int nvmet_register_transport(const struct nvmet_fabrics_ops *ops) |
a07b4970 CH |
279 | { |
280 | int ret = 0; | |
281 | ||
282 | down_write(&nvmet_config_sem); | |
283 | if (nvmet_transports[ops->type]) | |
284 | ret = -EINVAL; | |
285 | else | |
286 | nvmet_transports[ops->type] = ops; | |
287 | up_write(&nvmet_config_sem); | |
288 | ||
289 | return ret; | |
290 | } | |
291 | EXPORT_SYMBOL_GPL(nvmet_register_transport); | |
292 | ||
e929f06d | 293 | void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops) |
a07b4970 CH |
294 | { |
295 | down_write(&nvmet_config_sem); | |
296 | nvmet_transports[ops->type] = NULL; | |
297 | up_write(&nvmet_config_sem); | |
298 | } | |
299 | EXPORT_SYMBOL_GPL(nvmet_unregister_transport); | |
300 | ||
3aed8673 LG |
301 | void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys) |
302 | { | |
303 | struct nvmet_ctrl *ctrl; | |
304 | ||
305 | mutex_lock(&subsys->lock); | |
306 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { | |
307 | if (ctrl->port == port) | |
308 | ctrl->ops->delete_ctrl(ctrl); | |
309 | } | |
310 | mutex_unlock(&subsys->lock); | |
311 | } | |
312 | ||
a07b4970 CH |
313 | int nvmet_enable_port(struct nvmet_port *port) |
314 | { | |
e929f06d | 315 | const struct nvmet_fabrics_ops *ops; |
a07b4970 CH |
316 | int ret; |
317 | ||
318 | lockdep_assert_held(&nvmet_config_sem); | |
319 | ||
320 | ops = nvmet_transports[port->disc_addr.trtype]; | |
321 | if (!ops) { | |
322 | up_write(&nvmet_config_sem); | |
323 | request_module("nvmet-transport-%d", port->disc_addr.trtype); | |
324 | down_write(&nvmet_config_sem); | |
325 | ops = nvmet_transports[port->disc_addr.trtype]; | |
326 | if (!ops) { | |
327 | pr_err("transport type %d not supported\n", | |
328 | port->disc_addr.trtype); | |
329 | return -EINVAL; | |
330 | } | |
331 | } | |
332 | ||
333 | if (!try_module_get(ops->owner)) | |
334 | return -EINVAL; | |
335 | ||
ea52ac1c IR |
336 | /* |
337 | * If the user requested PI support and the transport isn't pi capable, | |
338 | * don't enable the port. | |
339 | */ | |
6fa350f7 | 340 | if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) { |
ea52ac1c IR |
341 | pr_err("T10-PI is not supported by transport type %d\n", |
342 | port->disc_addr.trtype); | |
343 | ret = -EINVAL; | |
344 | goto out_put; | |
a07b4970 CH |
345 | } |
346 | ||
ea52ac1c IR |
347 | ret = ops->add_port(port); |
348 | if (ret) | |
349 | goto out_put; | |
350 | ||
0d5ee2b2 SW |
351 | /* If the transport didn't set inline_data_size, then disable it. */ |
352 | if (port->inline_data_size < 0) | |
353 | port->inline_data_size = 0; | |
354 | ||
a07b4970 | 355 | port->enabled = true; |
9d09dd8d | 356 | port->tr_ops = ops; |
a07b4970 | 357 | return 0; |
ea52ac1c IR |
358 | |
359 | out_put: | |
360 | module_put(ops->owner); | |
361 | return ret; | |
a07b4970 CH |
362 | } |
363 | ||
364 | void nvmet_disable_port(struct nvmet_port *port) | |
365 | { | |
e929f06d | 366 | const struct nvmet_fabrics_ops *ops; |
a07b4970 CH |
367 | |
368 | lockdep_assert_held(&nvmet_config_sem); | |
369 | ||
370 | port->enabled = false; | |
9d09dd8d | 371 | port->tr_ops = NULL; |
a07b4970 CH |
372 | |
373 | ops = nvmet_transports[port->disc_addr.trtype]; | |
374 | ops->remove_port(port); | |
375 | module_put(ops->owner); | |
376 | } | |
377 | ||
378 | static void nvmet_keep_alive_timer(struct work_struct *work) | |
379 | { | |
380 | struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), | |
381 | struct nvmet_ctrl, ka_work); | |
c09305ae SG |
382 | bool cmd_seen = ctrl->cmd_seen; |
383 | ||
384 | ctrl->cmd_seen = false; | |
385 | if (cmd_seen) { | |
386 | pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", | |
387 | ctrl->cntlid); | |
388 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | |
389 | return; | |
390 | } | |
a07b4970 CH |
391 | |
392 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", | |
393 | ctrl->cntlid, ctrl->kato); | |
394 | ||
23a8ed4a | 395 | nvmet_ctrl_fatal_error(ctrl); |
a07b4970 CH |
396 | } |
397 | ||
398 | static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) | |
399 | { | |
0d3b6a8d AE |
400 | if (unlikely(ctrl->kato == 0)) |
401 | return; | |
402 | ||
a07b4970 CH |
403 | pr_debug("ctrl %d start keep-alive timer for %d secs\n", |
404 | ctrl->cntlid, ctrl->kato); | |
405 | ||
406 | INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); | |
407 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | |
408 | } | |
409 | ||
410 | static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) | |
411 | { | |
0d3b6a8d AE |
412 | if (unlikely(ctrl->kato == 0)) |
413 | return; | |
414 | ||
a07b4970 CH |
415 | pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); |
416 | ||
417 | cancel_delayed_work_sync(&ctrl->ka_work); | |
418 | } | |
419 | ||
a07b4970 CH |
420 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) |
421 | { | |
422 | struct nvmet_ns *ns; | |
423 | ||
7774e77e | 424 | ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid)); |
a07b4970 CH |
425 | if (ns) |
426 | percpu_ref_get(&ns->ref); | |
a07b4970 CH |
427 | |
428 | return ns; | |
429 | } | |
430 | ||
431 | static void nvmet_destroy_namespace(struct percpu_ref *ref) | |
432 | { | |
433 | struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref); | |
434 | ||
435 | complete(&ns->disable_done); | |
436 | } | |
437 | ||
438 | void nvmet_put_namespace(struct nvmet_ns *ns) | |
439 | { | |
440 | percpu_ref_put(&ns->ref); | |
441 | } | |
442 | ||
d5eff33e CK |
443 | static void nvmet_ns_dev_disable(struct nvmet_ns *ns) |
444 | { | |
445 | nvmet_bdev_ns_disable(ns); | |
446 | nvmet_file_ns_disable(ns); | |
447 | } | |
448 | ||
c6925093 LG |
449 | static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) |
450 | { | |
451 | int ret; | |
452 | struct pci_dev *p2p_dev; | |
453 | ||
454 | if (!ns->use_p2pmem) | |
455 | return 0; | |
456 | ||
457 | if (!ns->bdev) { | |
458 | pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n"); | |
459 | return -EINVAL; | |
460 | } | |
461 | ||
e556f6ba | 462 | if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { |
c6925093 LG |
463 | pr_err("peer-to-peer DMA is not supported by the driver of %s\n", |
464 | ns->device_path); | |
465 | return -EINVAL; | |
466 | } | |
467 | ||
468 | if (ns->p2p_dev) { | |
469 | ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true); | |
470 | if (ret < 0) | |
471 | return -EINVAL; | |
472 | } else { | |
473 | /* | |
474 | * Right now we just check that there is p2pmem available so | |
475 | * we can report an error to the user right away if there | |
476 | * is not. We'll find the actual device to use once we | |
477 | * setup the controller when the port's device is available. | |
478 | */ | |
479 | ||
480 | p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns)); | |
481 | if (!p2p_dev) { | |
482 | pr_err("no peer-to-peer memory is available for %s\n", | |
483 | ns->device_path); | |
484 | return -EINVAL; | |
485 | } | |
486 | ||
487 | pci_dev_put(p2p_dev); | |
488 | } | |
489 | ||
490 | return 0; | |
491 | } | |
492 | ||
493 | /* | |
494 | * Note: ctrl->subsys->lock should be held when calling this function | |
495 | */ | |
496 | static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, | |
497 | struct nvmet_ns *ns) | |
498 | { | |
499 | struct device *clients[2]; | |
500 | struct pci_dev *p2p_dev; | |
501 | int ret; | |
502 | ||
21d3bbdd | 503 | if (!ctrl->p2p_client || !ns->use_p2pmem) |
c6925093 LG |
504 | return; |
505 | ||
506 | if (ns->p2p_dev) { | |
507 | ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true); | |
508 | if (ret < 0) | |
509 | return; | |
510 | ||
511 | p2p_dev = pci_dev_get(ns->p2p_dev); | |
512 | } else { | |
513 | clients[0] = ctrl->p2p_client; | |
514 | clients[1] = nvmet_ns_dev(ns); | |
515 | ||
516 | p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients)); | |
517 | if (!p2p_dev) { | |
518 | pr_err("no peer-to-peer memory is available that's supported by %s and %s\n", | |
519 | dev_name(ctrl->p2p_client), ns->device_path); | |
520 | return; | |
521 | } | |
522 | } | |
523 | ||
524 | ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev); | |
525 | if (ret < 0) | |
526 | pci_dev_put(p2p_dev); | |
527 | ||
528 | pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev), | |
529 | ns->nsid); | |
530 | } | |
531 | ||
463c5fab CK |
532 | void nvmet_ns_revalidate(struct nvmet_ns *ns) |
533 | { | |
de124f42 CK |
534 | loff_t oldsize = ns->size; |
535 | ||
463c5fab CK |
536 | if (ns->bdev) |
537 | nvmet_bdev_ns_revalidate(ns); | |
538 | else | |
539 | nvmet_file_ns_revalidate(ns); | |
de124f42 CK |
540 | |
541 | if (oldsize != ns->size) | |
542 | nvmet_ns_changed(ns->subsys, ns->nsid); | |
463c5fab CK |
543 | } |
544 | ||
a07b4970 CH |
545 | int nvmet_ns_enable(struct nvmet_ns *ns) |
546 | { | |
547 | struct nvmet_subsys *subsys = ns->subsys; | |
c6925093 | 548 | struct nvmet_ctrl *ctrl; |
793c7cfc | 549 | int ret; |
a07b4970 CH |
550 | |
551 | mutex_lock(&subsys->lock); | |
793c7cfc | 552 | ret = 0; |
ba76af67 LG |
553 | |
554 | if (nvmet_passthru_ctrl(subsys)) { | |
555 | pr_info("cannot enable both passthru and regular namespaces for a single subsystem"); | |
556 | goto out_unlock; | |
557 | } | |
558 | ||
e4fcf07c | 559 | if (ns->enabled) |
a07b4970 CH |
560 | goto out_unlock; |
561 | ||
e84c2091 MG |
562 | ret = -EMFILE; |
563 | if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) | |
564 | goto out_unlock; | |
565 | ||
d5eff33e | 566 | ret = nvmet_bdev_ns_enable(ns); |
405a7519 | 567 | if (ret == -ENOTBLK) |
d5eff33e CK |
568 | ret = nvmet_file_ns_enable(ns); |
569 | if (ret) | |
a07b4970 | 570 | goto out_unlock; |
a07b4970 | 571 | |
c6925093 LG |
572 | ret = nvmet_p2pmem_ns_enable(ns); |
573 | if (ret) | |
a536b497 | 574 | goto out_dev_disable; |
c6925093 LG |
575 | |
576 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | |
577 | nvmet_p2pmem_ns_add_p2p(ctrl, ns); | |
578 | ||
a07b4970 CH |
579 | ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, |
580 | 0, GFP_KERNEL); | |
581 | if (ret) | |
d5eff33e | 582 | goto out_dev_put; |
a07b4970 CH |
583 | |
584 | if (ns->nsid > subsys->max_nsid) | |
585 | subsys->max_nsid = ns->nsid; | |
586 | ||
7774e77e CK |
587 | ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL); |
588 | if (ret) | |
589 | goto out_restore_subsys_maxnsid; | |
a07b4970 | 590 | |
793c7cfc | 591 | subsys->nr_namespaces++; |
a07b4970 | 592 | |
c16734ea | 593 | nvmet_ns_changed(subsys, ns->nsid); |
e4fcf07c | 594 | ns->enabled = true; |
a07b4970 CH |
595 | ret = 0; |
596 | out_unlock: | |
597 | mutex_unlock(&subsys->lock); | |
598 | return ret; | |
7774e77e CK |
599 | |
600 | out_restore_subsys_maxnsid: | |
601 | subsys->max_nsid = nvmet_max_nsid(subsys); | |
602 | percpu_ref_exit(&ns->ref); | |
d5eff33e | 603 | out_dev_put: |
c6925093 LG |
604 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
605 | pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); | |
a536b497 | 606 | out_dev_disable: |
d5eff33e | 607 | nvmet_ns_dev_disable(ns); |
a07b4970 CH |
608 | goto out_unlock; |
609 | } | |
610 | ||
611 | void nvmet_ns_disable(struct nvmet_ns *ns) | |
612 | { | |
613 | struct nvmet_subsys *subsys = ns->subsys; | |
c6925093 | 614 | struct nvmet_ctrl *ctrl; |
a07b4970 CH |
615 | |
616 | mutex_lock(&subsys->lock); | |
e4fcf07c SA |
617 | if (!ns->enabled) |
618 | goto out_unlock; | |
619 | ||
620 | ns->enabled = false; | |
7774e77e | 621 | xa_erase(&ns->subsys->namespaces, ns->nsid); |
ba2dec35 RS |
622 | if (ns->nsid == subsys->max_nsid) |
623 | subsys->max_nsid = nvmet_max_nsid(subsys); | |
c6925093 LG |
624 | |
625 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | |
626 | pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); | |
627 | ||
a07b4970 CH |
628 | mutex_unlock(&subsys->lock); |
629 | ||
630 | /* | |
631 | * Now that we removed the namespaces from the lookup list, we | |
632 | * can kill the per_cpu ref and wait for any remaining references | |
633 | * to be dropped, as well as a RCU grace period for anyone only | |
634 | * using the namepace under rcu_read_lock(). Note that we can't | |
635 | * use call_rcu here as we need to ensure the namespaces have | |
636 | * been fully destroyed before unloading the module. | |
637 | */ | |
638 | percpu_ref_kill(&ns->ref); | |
639 | synchronize_rcu(); | |
640 | wait_for_completion(&ns->disable_done); | |
641 | percpu_ref_exit(&ns->ref); | |
642 | ||
643 | mutex_lock(&subsys->lock); | |
c6925093 | 644 | |
793c7cfc | 645 | subsys->nr_namespaces--; |
c16734ea | 646 | nvmet_ns_changed(subsys, ns->nsid); |
d5eff33e | 647 | nvmet_ns_dev_disable(ns); |
e4fcf07c | 648 | out_unlock: |
a07b4970 CH |
649 | mutex_unlock(&subsys->lock); |
650 | } | |
651 | ||
652 | void nvmet_ns_free(struct nvmet_ns *ns) | |
653 | { | |
654 | nvmet_ns_disable(ns); | |
655 | ||
72efd25d CH |
656 | down_write(&nvmet_ana_sem); |
657 | nvmet_ana_group_enabled[ns->anagrpid]--; | |
658 | up_write(&nvmet_ana_sem); | |
659 | ||
a07b4970 CH |
660 | kfree(ns->device_path); |
661 | kfree(ns); | |
662 | } | |
663 | ||
664 | struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) | |
665 | { | |
666 | struct nvmet_ns *ns; | |
667 | ||
668 | ns = kzalloc(sizeof(*ns), GFP_KERNEL); | |
669 | if (!ns) | |
670 | return NULL; | |
671 | ||
a07b4970 CH |
672 | init_completion(&ns->disable_done); |
673 | ||
674 | ns->nsid = nsid; | |
675 | ns->subsys = subsys; | |
72efd25d CH |
676 | |
677 | down_write(&nvmet_ana_sem); | |
678 | ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; | |
679 | nvmet_ana_group_enabled[ns->anagrpid]++; | |
680 | up_write(&nvmet_ana_sem); | |
681 | ||
637dc0f3 | 682 | uuid_gen(&ns->uuid); |
55eb942e | 683 | ns->buffered_io = false; |
a07b4970 CH |
684 | |
685 | return ns; | |
686 | } | |
687 | ||
e6a622fd | 688 | static void nvmet_update_sq_head(struct nvmet_req *req) |
a07b4970 | 689 | { |
f9cf2a64 | 690 | if (req->sq->size) { |
e6a622fd SG |
691 | u32 old_sqhd, new_sqhd; |
692 | ||
f9cf2a64 JS |
693 | do { |
694 | old_sqhd = req->sq->sqhd; | |
695 | new_sqhd = (old_sqhd + 1) % req->sq->size; | |
696 | } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != | |
697 | old_sqhd); | |
698 | } | |
fc6c9730 | 699 | req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); |
e6a622fd SG |
700 | } |
701 | ||
76574f37 CK |
702 | static void nvmet_set_error(struct nvmet_req *req, u16 status) |
703 | { | |
704 | struct nvmet_ctrl *ctrl = req->sq->ctrl; | |
705 | struct nvme_error_slot *new_error_slot; | |
706 | unsigned long flags; | |
707 | ||
fc6c9730 | 708 | req->cqe->status = cpu_to_le16(status << 1); |
76574f37 | 709 | |
5698b805 | 710 | if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) |
76574f37 CK |
711 | return; |
712 | ||
713 | spin_lock_irqsave(&ctrl->error_lock, flags); | |
714 | ctrl->err_counter++; | |
715 | new_error_slot = | |
716 | &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; | |
717 | ||
718 | new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); | |
719 | new_error_slot->sqid = cpu_to_le16(req->sq->qid); | |
720 | new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id); | |
721 | new_error_slot->status_field = cpu_to_le16(status << 1); | |
722 | new_error_slot->param_error_location = cpu_to_le16(req->error_loc); | |
723 | new_error_slot->lba = cpu_to_le64(req->error_slba); | |
724 | new_error_slot->nsid = req->cmd->common.nsid; | |
725 | spin_unlock_irqrestore(&ctrl->error_lock, flags); | |
726 | ||
727 | /* set the more bit for this request */ | |
fc6c9730 | 728 | req->cqe->status |= cpu_to_le16(1 << 14); |
76574f37 CK |
729 | } |
730 | ||
e6a622fd SG |
731 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
732 | { | |
733 | if (!req->sq->sqhd_disabled) | |
734 | nvmet_update_sq_head(req); | |
fc6c9730 MG |
735 | req->cqe->sq_id = cpu_to_le16(req->sq->qid); |
736 | req->cqe->command_id = req->cmd->common.command_id; | |
76574f37 | 737 | |
cb019da3 | 738 | if (unlikely(status)) |
76574f37 | 739 | nvmet_set_error(req, status); |
a5448fdc MI |
740 | |
741 | trace_nvmet_req_complete(req); | |
742 | ||
a07b4970 CH |
743 | if (req->ns) |
744 | nvmet_put_namespace(req->ns); | |
745 | req->ops->queue_response(req); | |
746 | } | |
747 | ||
748 | void nvmet_req_complete(struct nvmet_req *req, u16 status) | |
749 | { | |
750 | __nvmet_req_complete(req, status); | |
751 | percpu_ref_put(&req->sq->ref); | |
752 | } | |
753 | EXPORT_SYMBOL_GPL(nvmet_req_complete); | |
754 | ||
755 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, | |
756 | u16 qid, u16 size) | |
757 | { | |
758 | cq->qid = qid; | |
759 | cq->size = size; | |
760 | ||
761 | ctrl->cqs[qid] = cq; | |
762 | } | |
763 | ||
764 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, | |
765 | u16 qid, u16 size) | |
766 | { | |
bb1cc747 | 767 | sq->sqhd = 0; |
a07b4970 CH |
768 | sq->qid = qid; |
769 | sq->size = size; | |
770 | ||
771 | ctrl->sqs[qid] = sq; | |
772 | } | |
773 | ||
427242ce SG |
774 | static void nvmet_confirm_sq(struct percpu_ref *ref) |
775 | { | |
776 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); | |
777 | ||
778 | complete(&sq->confirm_done); | |
779 | } | |
780 | ||
a07b4970 CH |
781 | void nvmet_sq_destroy(struct nvmet_sq *sq) |
782 | { | |
0f5be6a4 DW |
783 | struct nvmet_ctrl *ctrl = sq->ctrl; |
784 | ||
a07b4970 CH |
785 | /* |
786 | * If this is the admin queue, complete all AERs so that our | |
787 | * queue doesn't have outstanding requests on it. | |
788 | */ | |
64f5e9cd | 789 | if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) |
819f7b88 | 790 | nvmet_async_events_failall(ctrl); |
427242ce SG |
791 | percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); |
792 | wait_for_completion(&sq->confirm_done); | |
a07b4970 CH |
793 | wait_for_completion(&sq->free_done); |
794 | percpu_ref_exit(&sq->ref); | |
795 | ||
0f5be6a4 DW |
796 | if (ctrl) { |
797 | nvmet_ctrl_put(ctrl); | |
a07b4970 CH |
798 | sq->ctrl = NULL; /* allows reusing the queue later */ |
799 | } | |
800 | } | |
801 | EXPORT_SYMBOL_GPL(nvmet_sq_destroy); | |
802 | ||
803 | static void nvmet_sq_free(struct percpu_ref *ref) | |
804 | { | |
805 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); | |
806 | ||
807 | complete(&sq->free_done); | |
808 | } | |
809 | ||
810 | int nvmet_sq_init(struct nvmet_sq *sq) | |
811 | { | |
812 | int ret; | |
813 | ||
814 | ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); | |
815 | if (ret) { | |
816 | pr_err("percpu_ref init failed!\n"); | |
817 | return ret; | |
818 | } | |
819 | init_completion(&sq->free_done); | |
427242ce | 820 | init_completion(&sq->confirm_done); |
a07b4970 CH |
821 | |
822 | return 0; | |
823 | } | |
824 | EXPORT_SYMBOL_GPL(nvmet_sq_init); | |
825 | ||
72efd25d CH |
826 | static inline u16 nvmet_check_ana_state(struct nvmet_port *port, |
827 | struct nvmet_ns *ns) | |
828 | { | |
829 | enum nvme_ana_state state = port->ana_state[ns->anagrpid]; | |
830 | ||
831 | if (unlikely(state == NVME_ANA_INACCESSIBLE)) | |
832 | return NVME_SC_ANA_INACCESSIBLE; | |
833 | if (unlikely(state == NVME_ANA_PERSISTENT_LOSS)) | |
834 | return NVME_SC_ANA_PERSISTENT_LOSS; | |
835 | if (unlikely(state == NVME_ANA_CHANGE)) | |
836 | return NVME_SC_ANA_TRANSITION; | |
837 | return 0; | |
838 | } | |
839 | ||
dedf0be5 CK |
840 | static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) |
841 | { | |
842 | if (unlikely(req->ns->readonly)) { | |
843 | switch (req->cmd->common.opcode) { | |
844 | case nvme_cmd_read: | |
845 | case nvme_cmd_flush: | |
846 | break; | |
847 | default: | |
848 | return NVME_SC_NS_WRITE_PROTECTED; | |
849 | } | |
850 | } | |
851 | ||
852 | return 0; | |
853 | } | |
854 | ||
d5eff33e CK |
855 | static u16 nvmet_parse_io_cmd(struct nvmet_req *req) |
856 | { | |
857 | struct nvme_command *cmd = req->cmd; | |
858 | u16 ret; | |
859 | ||
860 | ret = nvmet_check_ctrl_status(req, cmd); | |
861 | if (unlikely(ret)) | |
862 | return ret; | |
863 | ||
c1fef73f LG |
864 | if (nvmet_req_passthru_ctrl(req)) |
865 | return nvmet_parse_passthru_io_cmd(req); | |
866 | ||
d5eff33e | 867 | req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); |
e81446af CK |
868 | if (unlikely(!req->ns)) { |
869 | req->error_loc = offsetof(struct nvme_common_command, nsid); | |
d5eff33e | 870 | return NVME_SC_INVALID_NS | NVME_SC_DNR; |
e81446af | 871 | } |
72efd25d | 872 | ret = nvmet_check_ana_state(req->port, req->ns); |
e81446af CK |
873 | if (unlikely(ret)) { |
874 | req->error_loc = offsetof(struct nvme_common_command, nsid); | |
dedf0be5 | 875 | return ret; |
e81446af | 876 | } |
dedf0be5 | 877 | ret = nvmet_io_cmd_check_access(req); |
e81446af CK |
878 | if (unlikely(ret)) { |
879 | req->error_loc = offsetof(struct nvme_common_command, nsid); | |
72efd25d | 880 | return ret; |
e81446af | 881 | } |
d5eff33e CK |
882 | |
883 | if (req->ns->file) | |
884 | return nvmet_file_parse_io_cmd(req); | |
885 | else | |
886 | return nvmet_bdev_parse_io_cmd(req); | |
887 | } | |
888 | ||
a07b4970 | 889 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, |
e929f06d | 890 | struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) |
a07b4970 CH |
891 | { |
892 | u8 flags = req->cmd->common.flags; | |
893 | u16 status; | |
894 | ||
895 | req->cq = cq; | |
896 | req->sq = sq; | |
897 | req->ops = ops; | |
898 | req->sg = NULL; | |
c6e3f133 | 899 | req->metadata_sg = NULL; |
a07b4970 | 900 | req->sg_cnt = 0; |
c6e3f133 | 901 | req->metadata_sg_cnt = 0; |
5e62d5c9 | 902 | req->transfer_len = 0; |
c6e3f133 | 903 | req->metadata_len = 0; |
fc6c9730 MG |
904 | req->cqe->status = 0; |
905 | req->cqe->sq_head = 0; | |
423b4487 | 906 | req->ns = NULL; |
5698b805 | 907 | req->error_loc = NVMET_NO_ERROR_LOC; |
e4a97625 | 908 | req->error_slba = 0; |
a07b4970 | 909 | |
a5448fdc MI |
910 | trace_nvmet_req_init(req, req->cmd); |
911 | ||
a07b4970 CH |
912 | /* no support for fused commands yet */ |
913 | if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { | |
e81446af | 914 | req->error_loc = offsetof(struct nvme_common_command, flags); |
a07b4970 CH |
915 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
916 | goto fail; | |
917 | } | |
918 | ||
bffd2b61 MG |
919 | /* |
920 | * For fabrics, PSDT field shall describe metadata pointer (MPTR) that | |
921 | * contains an address of a single contiguous physical buffer that is | |
922 | * byte aligned. | |
923 | */ | |
924 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { | |
e81446af | 925 | req->error_loc = offsetof(struct nvme_common_command, flags); |
a07b4970 CH |
926 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
927 | goto fail; | |
928 | } | |
929 | ||
930 | if (unlikely(!req->sq->ctrl)) | |
d84dd8cd | 931 | /* will return an error for any non-connect command: */ |
a07b4970 CH |
932 | status = nvmet_parse_connect_cmd(req); |
933 | else if (likely(req->sq->qid != 0)) | |
934 | status = nvmet_parse_io_cmd(req); | |
a07b4970 CH |
935 | else |
936 | status = nvmet_parse_admin_cmd(req); | |
937 | ||
938 | if (status) | |
939 | goto fail; | |
940 | ||
941 | if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { | |
942 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
943 | goto fail; | |
944 | } | |
945 | ||
c09305ae SG |
946 | if (sq->ctrl) |
947 | sq->ctrl->cmd_seen = true; | |
948 | ||
a07b4970 CH |
949 | return true; |
950 | ||
951 | fail: | |
952 | __nvmet_req_complete(req, status); | |
953 | return false; | |
954 | } | |
955 | EXPORT_SYMBOL_GPL(nvmet_req_init); | |
956 | ||
549f01ae VI |
957 | void nvmet_req_uninit(struct nvmet_req *req) |
958 | { | |
959 | percpu_ref_put(&req->sq->ref); | |
423b4487 SG |
960 | if (req->ns) |
961 | nvmet_put_namespace(req->ns); | |
549f01ae VI |
962 | } |
963 | EXPORT_SYMBOL_GPL(nvmet_req_uninit); | |
964 | ||
136cc1ff | 965 | bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) |
5e62d5c9 | 966 | { |
136cc1ff | 967 | if (unlikely(len != req->transfer_len)) { |
e81446af | 968 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
5e62d5c9 | 969 | nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); |
e9061c39 CH |
970 | return false; |
971 | } | |
972 | ||
973 | return true; | |
974 | } | |
136cc1ff | 975 | EXPORT_SYMBOL_GPL(nvmet_check_transfer_len); |
e9061c39 | 976 | |
b716e688 SG |
977 | bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) |
978 | { | |
979 | if (unlikely(data_len > req->transfer_len)) { | |
980 | req->error_loc = offsetof(struct nvme_common_command, dptr); | |
981 | nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); | |
982 | return false; | |
983 | } | |
984 | ||
985 | return true; | |
986 | } | |
987 | ||
c6e3f133 | 988 | static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) |
5b2322e4 | 989 | { |
c6e3f133 IR |
990 | return req->transfer_len - req->metadata_len; |
991 | } | |
c6925093 | 992 | |
c6e3f133 IR |
993 | static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req) |
994 | { | |
995 | req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt, | |
996 | nvmet_data_transfer_len(req)); | |
997 | if (!req->sg) | |
998 | goto out_err; | |
999 | ||
1000 | if (req->metadata_len) { | |
1001 | req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev, | |
1002 | &req->metadata_sg_cnt, req->metadata_len); | |
1003 | if (!req->metadata_sg) | |
1004 | goto out_free_sg; | |
1005 | } | |
1006 | return 0; | |
1007 | out_free_sg: | |
1008 | pci_p2pmem_free_sgl(req->p2p_dev, req->sg); | |
1009 | out_err: | |
1010 | return -ENOMEM; | |
1011 | } | |
1012 | ||
1013 | static bool nvmet_req_find_p2p_dev(struct nvmet_req *req) | |
1014 | { | |
1015 | if (!IS_ENABLED(CONFIG_PCI_P2PDMA)) | |
1016 | return false; | |
1017 | ||
1018 | if (req->sq->ctrl && req->sq->qid && req->ns) { | |
1019 | req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, | |
1020 | req->ns->nsid); | |
1021 | if (req->p2p_dev) | |
1022 | return true; | |
c6925093 LG |
1023 | } |
1024 | ||
c6e3f133 IR |
1025 | req->p2p_dev = NULL; |
1026 | return false; | |
1027 | } | |
1028 | ||
1029 | int nvmet_req_alloc_sgls(struct nvmet_req *req) | |
1030 | { | |
1031 | if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req)) | |
1032 | return 0; | |
1033 | ||
1034 | req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, | |
1035 | &req->sg_cnt); | |
e522f446 | 1036 | if (unlikely(!req->sg)) |
c6e3f133 IR |
1037 | goto out; |
1038 | ||
1039 | if (req->metadata_len) { | |
1040 | req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL, | |
1041 | &req->metadata_sg_cnt); | |
1042 | if (unlikely(!req->metadata_sg)) | |
1043 | goto out_free; | |
1044 | } | |
5b2322e4 LG |
1045 | |
1046 | return 0; | |
c6e3f133 IR |
1047 | out_free: |
1048 | sgl_free(req->sg); | |
1049 | out: | |
1050 | return -ENOMEM; | |
5b2322e4 | 1051 | } |
c6e3f133 | 1052 | EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls); |
5b2322e4 | 1053 | |
c6e3f133 | 1054 | void nvmet_req_free_sgls(struct nvmet_req *req) |
5b2322e4 | 1055 | { |
c6e3f133 | 1056 | if (req->p2p_dev) { |
c6925093 | 1057 | pci_p2pmem_free_sgl(req->p2p_dev, req->sg); |
c6e3f133 IR |
1058 | if (req->metadata_sg) |
1059 | pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); | |
1060 | } else { | |
c6925093 | 1061 | sgl_free(req->sg); |
c6e3f133 IR |
1062 | if (req->metadata_sg) |
1063 | sgl_free(req->metadata_sg); | |
1064 | } | |
c6925093 | 1065 | |
5b2322e4 | 1066 | req->sg = NULL; |
c6e3f133 | 1067 | req->metadata_sg = NULL; |
5b2322e4 | 1068 | req->sg_cnt = 0; |
c6e3f133 | 1069 | req->metadata_sg_cnt = 0; |
5b2322e4 | 1070 | } |
c6e3f133 | 1071 | EXPORT_SYMBOL_GPL(nvmet_req_free_sgls); |
5b2322e4 | 1072 | |
a07b4970 CH |
1073 | static inline bool nvmet_cc_en(u32 cc) |
1074 | { | |
ad4e05b2 | 1075 | return (cc >> NVME_CC_EN_SHIFT) & 0x1; |
a07b4970 CH |
1076 | } |
1077 | ||
1078 | static inline u8 nvmet_cc_css(u32 cc) | |
1079 | { | |
ad4e05b2 | 1080 | return (cc >> NVME_CC_CSS_SHIFT) & 0x7; |
a07b4970 CH |
1081 | } |
1082 | ||
1083 | static inline u8 nvmet_cc_mps(u32 cc) | |
1084 | { | |
ad4e05b2 | 1085 | return (cc >> NVME_CC_MPS_SHIFT) & 0xf; |
a07b4970 CH |
1086 | } |
1087 | ||
1088 | static inline u8 nvmet_cc_ams(u32 cc) | |
1089 | { | |
ad4e05b2 | 1090 | return (cc >> NVME_CC_AMS_SHIFT) & 0x7; |
a07b4970 CH |
1091 | } |
1092 | ||
1093 | static inline u8 nvmet_cc_shn(u32 cc) | |
1094 | { | |
ad4e05b2 | 1095 | return (cc >> NVME_CC_SHN_SHIFT) & 0x3; |
a07b4970 CH |
1096 | } |
1097 | ||
1098 | static inline u8 nvmet_cc_iosqes(u32 cc) | |
1099 | { | |
ad4e05b2 | 1100 | return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; |
a07b4970 CH |
1101 | } |
1102 | ||
1103 | static inline u8 nvmet_cc_iocqes(u32 cc) | |
1104 | { | |
ad4e05b2 | 1105 | return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; |
a07b4970 CH |
1106 | } |
1107 | ||
1108 | static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) | |
1109 | { | |
1110 | lockdep_assert_held(&ctrl->lock); | |
1111 | ||
1112 | if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || | |
1113 | nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || | |
1114 | nvmet_cc_mps(ctrl->cc) != 0 || | |
1115 | nvmet_cc_ams(ctrl->cc) != 0 || | |
1116 | nvmet_cc_css(ctrl->cc) != 0) { | |
1117 | ctrl->csts = NVME_CSTS_CFS; | |
1118 | return; | |
1119 | } | |
1120 | ||
1121 | ctrl->csts = NVME_CSTS_RDY; | |
d68a90e1 MG |
1122 | |
1123 | /* | |
1124 | * Controllers that are not yet enabled should not really enforce the | |
1125 | * keep alive timeout, but we still want to track a timeout and cleanup | |
1126 | * in case a host died before it enabled the controller. Hence, simply | |
1127 | * reset the keep alive timer when the controller is enabled. | |
1128 | */ | |
1129 | mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); | |
a07b4970 CH |
1130 | } |
1131 | ||
1132 | static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) | |
1133 | { | |
1134 | lockdep_assert_held(&ctrl->lock); | |
1135 | ||
1136 | /* XXX: tear down queues? */ | |
1137 | ctrl->csts &= ~NVME_CSTS_RDY; | |
1138 | ctrl->cc = 0; | |
1139 | } | |
1140 | ||
1141 | void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) | |
1142 | { | |
1143 | u32 old; | |
1144 | ||
1145 | mutex_lock(&ctrl->lock); | |
1146 | old = ctrl->cc; | |
1147 | ctrl->cc = new; | |
1148 | ||
1149 | if (nvmet_cc_en(new) && !nvmet_cc_en(old)) | |
1150 | nvmet_start_ctrl(ctrl); | |
1151 | if (!nvmet_cc_en(new) && nvmet_cc_en(old)) | |
1152 | nvmet_clear_ctrl(ctrl); | |
1153 | if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) { | |
1154 | nvmet_clear_ctrl(ctrl); | |
1155 | ctrl->csts |= NVME_CSTS_SHST_CMPLT; | |
1156 | } | |
1157 | if (!nvmet_cc_shn(new) && nvmet_cc_shn(old)) | |
1158 | ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; | |
1159 | mutex_unlock(&ctrl->lock); | |
1160 | } | |
1161 | ||
1162 | static void nvmet_init_cap(struct nvmet_ctrl *ctrl) | |
1163 | { | |
1164 | /* command sets supported: NVMe command set: */ | |
1165 | ctrl->cap = (1ULL << 37); | |
1166 | /* CC.EN timeout in 500msec units: */ | |
1167 | ctrl->cap |= (15ULL << 24); | |
1168 | /* maximum queue entries supported: */ | |
1169 | ctrl->cap |= NVMET_QUEUE_SIZE - 1; | |
1170 | } | |
1171 | ||
1172 | u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, | |
1173 | struct nvmet_req *req, struct nvmet_ctrl **ret) | |
1174 | { | |
1175 | struct nvmet_subsys *subsys; | |
1176 | struct nvmet_ctrl *ctrl; | |
1177 | u16 status = 0; | |
1178 | ||
1179 | subsys = nvmet_find_get_subsys(req->port, subsysnqn); | |
1180 | if (!subsys) { | |
1181 | pr_warn("connect request for invalid subsystem %s!\n", | |
1182 | subsysnqn); | |
fc6c9730 | 1183 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
a07b4970 CH |
1184 | return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
1185 | } | |
1186 | ||
1187 | mutex_lock(&subsys->lock); | |
1188 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { | |
1189 | if (ctrl->cntlid == cntlid) { | |
1190 | if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { | |
1191 | pr_warn("hostnqn mismatch.\n"); | |
1192 | continue; | |
1193 | } | |
1194 | if (!kref_get_unless_zero(&ctrl->ref)) | |
1195 | continue; | |
1196 | ||
1197 | *ret = ctrl; | |
1198 | goto out; | |
1199 | } | |
1200 | } | |
1201 | ||
1202 | pr_warn("could not find controller %d for subsys %s / host %s\n", | |
1203 | cntlid, subsysnqn, hostnqn); | |
fc6c9730 | 1204 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); |
a07b4970 CH |
1205 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
1206 | ||
1207 | out: | |
1208 | mutex_unlock(&subsys->lock); | |
1209 | nvmet_subsys_put(subsys); | |
1210 | return status; | |
1211 | } | |
1212 | ||
64a0ca88 PP |
1213 | u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd) |
1214 | { | |
1215 | if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { | |
b40b83e3 | 1216 | pr_err("got cmd %d while CC.EN == 0 on qid = %d\n", |
64a0ca88 PP |
1217 | cmd->common.opcode, req->sq->qid); |
1218 | return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; | |
1219 | } | |
1220 | ||
1221 | if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { | |
b40b83e3 | 1222 | pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n", |
64a0ca88 | 1223 | cmd->common.opcode, req->sq->qid); |
64a0ca88 PP |
1224 | return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; |
1225 | } | |
1226 | return 0; | |
1227 | } | |
1228 | ||
253928ee | 1229 | bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) |
a07b4970 CH |
1230 | { |
1231 | struct nvmet_host_link *p; | |
1232 | ||
253928ee SG |
1233 | lockdep_assert_held(&nvmet_config_sem); |
1234 | ||
a07b4970 CH |
1235 | if (subsys->allow_any_host) |
1236 | return true; | |
1237 | ||
253928ee SG |
1238 | if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */ |
1239 | return true; | |
1240 | ||
a07b4970 CH |
1241 | list_for_each_entry(p, &subsys->hosts, entry) { |
1242 | if (!strcmp(nvmet_host_name(p->host), hostnqn)) | |
1243 | return true; | |
1244 | } | |
1245 | ||
1246 | return false; | |
1247 | } | |
1248 | ||
c6925093 LG |
1249 | /* |
1250 | * Note: ctrl->subsys->lock should be held when calling this function | |
1251 | */ | |
1252 | static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, | |
1253 | struct nvmet_req *req) | |
1254 | { | |
1255 | struct nvmet_ns *ns; | |
7774e77e | 1256 | unsigned long idx; |
c6925093 LG |
1257 | |
1258 | if (!req->p2p_client) | |
1259 | return; | |
1260 | ||
1261 | ctrl->p2p_client = get_device(req->p2p_client); | |
1262 | ||
7774e77e | 1263 | xa_for_each(&ctrl->subsys->namespaces, idx, ns) |
c6925093 LG |
1264 | nvmet_p2pmem_ns_add_p2p(ctrl, ns); |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * Note: ctrl->subsys->lock should be held when calling this function | |
1269 | */ | |
1270 | static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) | |
1271 | { | |
1272 | struct radix_tree_iter iter; | |
1273 | void __rcu **slot; | |
1274 | ||
1275 | radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) | |
1276 | pci_dev_put(radix_tree_deref_slot(slot)); | |
1277 | ||
1278 | put_device(ctrl->p2p_client); | |
1279 | } | |
1280 | ||
d11de63f YY |
1281 | static void nvmet_fatal_error_handler(struct work_struct *work) |
1282 | { | |
1283 | struct nvmet_ctrl *ctrl = | |
1284 | container_of(work, struct nvmet_ctrl, fatal_err_work); | |
1285 | ||
1286 | pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); | |
1287 | ctrl->ops->delete_ctrl(ctrl); | |
1288 | } | |
1289 | ||
a07b4970 CH |
1290 | u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, |
1291 | struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) | |
1292 | { | |
1293 | struct nvmet_subsys *subsys; | |
1294 | struct nvmet_ctrl *ctrl; | |
1295 | int ret; | |
1296 | u16 status; | |
1297 | ||
1298 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | |
1299 | subsys = nvmet_find_get_subsys(req->port, subsysnqn); | |
1300 | if (!subsys) { | |
1301 | pr_warn("connect request for invalid subsystem %s!\n", | |
1302 | subsysnqn); | |
fc6c9730 | 1303 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
a07b4970 CH |
1304 | goto out; |
1305 | } | |
1306 | ||
1307 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | |
1308 | down_read(&nvmet_config_sem); | |
253928ee | 1309 | if (!nvmet_host_allowed(subsys, hostnqn)) { |
a07b4970 CH |
1310 | pr_info("connect by host %s for subsystem %s not allowed\n", |
1311 | hostnqn, subsysnqn); | |
fc6c9730 | 1312 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); |
a07b4970 | 1313 | up_read(&nvmet_config_sem); |
130c24b5 | 1314 | status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; |
a07b4970 CH |
1315 | goto out_put_subsystem; |
1316 | } | |
1317 | up_read(&nvmet_config_sem); | |
1318 | ||
1319 | status = NVME_SC_INTERNAL; | |
1320 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); | |
1321 | if (!ctrl) | |
1322 | goto out_put_subsystem; | |
1323 | mutex_init(&ctrl->lock); | |
1324 | ||
1325 | nvmet_init_cap(ctrl); | |
1326 | ||
4ee43280 CH |
1327 | ctrl->port = req->port; |
1328 | ||
a07b4970 CH |
1329 | INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); |
1330 | INIT_LIST_HEAD(&ctrl->async_events); | |
c6925093 | 1331 | INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); |
d11de63f | 1332 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); |
a07b4970 CH |
1333 | |
1334 | memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); | |
1335 | memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); | |
1336 | ||
1337 | kref_init(&ctrl->ref); | |
1338 | ctrl->subsys = subsys; | |
c86b8f7b | 1339 | WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); |
a07b4970 | 1340 | |
c16734ea CH |
1341 | ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES, |
1342 | sizeof(__le32), GFP_KERNEL); | |
1343 | if (!ctrl->changed_ns_list) | |
1344 | goto out_free_ctrl; | |
1345 | ||
a07b4970 CH |
1346 | ctrl->cqs = kcalloc(subsys->max_qid + 1, |
1347 | sizeof(struct nvmet_cq *), | |
1348 | GFP_KERNEL); | |
1349 | if (!ctrl->cqs) | |
c16734ea | 1350 | goto out_free_changed_ns_list; |
a07b4970 CH |
1351 | |
1352 | ctrl->sqs = kcalloc(subsys->max_qid + 1, | |
1353 | sizeof(struct nvmet_sq *), | |
1354 | GFP_KERNEL); | |
1355 | if (!ctrl->sqs) | |
1356 | goto out_free_cqs; | |
1357 | ||
94a39d61 CK |
1358 | if (subsys->cntlid_min > subsys->cntlid_max) |
1359 | goto out_free_cqs; | |
1360 | ||
15fbad96 | 1361 | ret = ida_simple_get(&cntlid_ida, |
94a39d61 | 1362 | subsys->cntlid_min, subsys->cntlid_max, |
a07b4970 CH |
1363 | GFP_KERNEL); |
1364 | if (ret < 0) { | |
1365 | status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; | |
1366 | goto out_free_sqs; | |
1367 | } | |
1368 | ctrl->cntlid = ret; | |
1369 | ||
1370 | ctrl->ops = req->ops; | |
a07b4970 | 1371 | |
f9362ac1 JS |
1372 | /* |
1373 | * Discovery controllers may use some arbitrary high value | |
1374 | * in order to cleanup stale discovery sessions | |
1375 | */ | |
1376 | if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato) | |
1377 | kato = NVMET_DISC_KATO_MS; | |
1378 | ||
1379 | /* keep-alive timeout in seconds */ | |
1380 | ctrl->kato = DIV_ROUND_UP(kato, 1000); | |
1381 | ||
e4a97625 CK |
1382 | ctrl->err_counter = 0; |
1383 | spin_lock_init(&ctrl->error_lock); | |
1384 | ||
a07b4970 CH |
1385 | nvmet_start_keep_alive_timer(ctrl); |
1386 | ||
1387 | mutex_lock(&subsys->lock); | |
1388 | list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); | |
c6925093 | 1389 | nvmet_setup_p2p_ns_map(ctrl, req); |
a07b4970 CH |
1390 | mutex_unlock(&subsys->lock); |
1391 | ||
1392 | *ctrlp = ctrl; | |
1393 | return 0; | |
1394 | ||
1395 | out_free_sqs: | |
1396 | kfree(ctrl->sqs); | |
1397 | out_free_cqs: | |
1398 | kfree(ctrl->cqs); | |
c16734ea CH |
1399 | out_free_changed_ns_list: |
1400 | kfree(ctrl->changed_ns_list); | |
a07b4970 CH |
1401 | out_free_ctrl: |
1402 | kfree(ctrl); | |
1403 | out_put_subsystem: | |
1404 | nvmet_subsys_put(subsys); | |
1405 | out: | |
1406 | return status; | |
1407 | } | |
1408 | ||
1409 | static void nvmet_ctrl_free(struct kref *ref) | |
1410 | { | |
1411 | struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); | |
1412 | struct nvmet_subsys *subsys = ctrl->subsys; | |
1413 | ||
a07b4970 | 1414 | mutex_lock(&subsys->lock); |
c6925093 | 1415 | nvmet_release_p2p_ns_map(ctrl); |
a07b4970 CH |
1416 | list_del(&ctrl->subsys_entry); |
1417 | mutex_unlock(&subsys->lock); | |
1418 | ||
6b1943af IR |
1419 | nvmet_stop_keep_alive_timer(ctrl); |
1420 | ||
06406d81 SG |
1421 | flush_work(&ctrl->async_event_work); |
1422 | cancel_work_sync(&ctrl->fatal_err_work); | |
1423 | ||
15fbad96 | 1424 | ida_simple_remove(&cntlid_ida, ctrl->cntlid); |
a07b4970 | 1425 | |
64f5e9cd | 1426 | nvmet_async_events_free(ctrl); |
a07b4970 CH |
1427 | kfree(ctrl->sqs); |
1428 | kfree(ctrl->cqs); | |
c16734ea | 1429 | kfree(ctrl->changed_ns_list); |
a07b4970 | 1430 | kfree(ctrl); |
6b1943af IR |
1431 | |
1432 | nvmet_subsys_put(subsys); | |
a07b4970 CH |
1433 | } |
1434 | ||
1435 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) | |
1436 | { | |
1437 | kref_put(&ctrl->ref, nvmet_ctrl_free); | |
1438 | } | |
1439 | ||
a07b4970 CH |
1440 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) |
1441 | { | |
8242ddac SG |
1442 | mutex_lock(&ctrl->lock); |
1443 | if (!(ctrl->csts & NVME_CSTS_CFS)) { | |
1444 | ctrl->csts |= NVME_CSTS_CFS; | |
8242ddac SG |
1445 | schedule_work(&ctrl->fatal_err_work); |
1446 | } | |
1447 | mutex_unlock(&ctrl->lock); | |
a07b4970 CH |
1448 | } |
1449 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); | |
1450 | ||
1451 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, | |
1452 | const char *subsysnqn) | |
1453 | { | |
1454 | struct nvmet_subsys_link *p; | |
1455 | ||
1456 | if (!port) | |
1457 | return NULL; | |
1458 | ||
43a6f8fb | 1459 | if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) { |
a07b4970 CH |
1460 | if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) |
1461 | return NULL; | |
1462 | return nvmet_disc_subsys; | |
1463 | } | |
1464 | ||
1465 | down_read(&nvmet_config_sem); | |
1466 | list_for_each_entry(p, &port->subsystems, entry) { | |
1467 | if (!strncmp(p->subsys->subsysnqn, subsysnqn, | |
1468 | NVMF_NQN_SIZE)) { | |
1469 | if (!kref_get_unless_zero(&p->subsys->ref)) | |
1470 | break; | |
1471 | up_read(&nvmet_config_sem); | |
1472 | return p->subsys; | |
1473 | } | |
1474 | } | |
1475 | up_read(&nvmet_config_sem); | |
1476 | return NULL; | |
1477 | } | |
1478 | ||
1479 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, | |
1480 | enum nvme_subsys_type type) | |
1481 | { | |
1482 | struct nvmet_subsys *subsys; | |
1483 | ||
1484 | subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); | |
1485 | if (!subsys) | |
6b7e631b | 1486 | return ERR_PTR(-ENOMEM); |
a07b4970 | 1487 | |
ba76af67 | 1488 | subsys->ver = NVMET_DEFAULT_VS; |
2e7f5d2a JT |
1489 | /* generate a random serial number as our controllers are ephemeral: */ |
1490 | get_random_bytes(&subsys->serial, sizeof(subsys->serial)); | |
a07b4970 CH |
1491 | |
1492 | switch (type) { | |
1493 | case NVME_NQN_NVME: | |
1494 | subsys->max_qid = NVMET_NR_QUEUES; | |
1495 | break; | |
1496 | case NVME_NQN_DISC: | |
1497 | subsys->max_qid = 0; | |
1498 | break; | |
1499 | default: | |
1500 | pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); | |
1501 | kfree(subsys); | |
6b7e631b | 1502 | return ERR_PTR(-EINVAL); |
a07b4970 CH |
1503 | } |
1504 | subsys->type = type; | |
1505 | subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, | |
1506 | GFP_KERNEL); | |
69555af2 | 1507 | if (!subsys->subsysnqn) { |
a07b4970 | 1508 | kfree(subsys); |
6b7e631b | 1509 | return ERR_PTR(-ENOMEM); |
a07b4970 | 1510 | } |
94a39d61 CK |
1511 | subsys->cntlid_min = NVME_CNTLID_MIN; |
1512 | subsys->cntlid_max = NVME_CNTLID_MAX; | |
a07b4970 CH |
1513 | kref_init(&subsys->ref); |
1514 | ||
1515 | mutex_init(&subsys->lock); | |
7774e77e | 1516 | xa_init(&subsys->namespaces); |
a07b4970 | 1517 | INIT_LIST_HEAD(&subsys->ctrls); |
a07b4970 CH |
1518 | INIT_LIST_HEAD(&subsys->hosts); |
1519 | ||
1520 | return subsys; | |
1521 | } | |
1522 | ||
1523 | static void nvmet_subsys_free(struct kref *ref) | |
1524 | { | |
1525 | struct nvmet_subsys *subsys = | |
1526 | container_of(ref, struct nvmet_subsys, ref); | |
1527 | ||
7774e77e | 1528 | WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); |
a07b4970 | 1529 | |
7774e77e | 1530 | xa_destroy(&subsys->namespaces); |
ba76af67 LG |
1531 | nvmet_passthru_subsys_free(subsys); |
1532 | ||
a07b4970 | 1533 | kfree(subsys->subsysnqn); |
013b7ebe | 1534 | kfree_rcu(subsys->model, rcuhead); |
a07b4970 CH |
1535 | kfree(subsys); |
1536 | } | |
1537 | ||
344770b0 SG |
1538 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) |
1539 | { | |
1540 | struct nvmet_ctrl *ctrl; | |
1541 | ||
1542 | mutex_lock(&subsys->lock); | |
1543 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | |
1544 | ctrl->ops->delete_ctrl(ctrl); | |
1545 | mutex_unlock(&subsys->lock); | |
1546 | } | |
1547 | ||
a07b4970 CH |
1548 | void nvmet_subsys_put(struct nvmet_subsys *subsys) |
1549 | { | |
1550 | kref_put(&subsys->ref, nvmet_subsys_free); | |
1551 | } | |
1552 | ||
1553 | static int __init nvmet_init(void) | |
1554 | { | |
1555 | int error; | |
1556 | ||
72efd25d CH |
1557 | nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; |
1558 | ||
55eb942e CK |
1559 | buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", |
1560 | WQ_MEM_RECLAIM, 0); | |
1561 | if (!buffered_io_wq) { | |
1562 | error = -ENOMEM; | |
1563 | goto out; | |
1564 | } | |
72efd25d | 1565 | |
a07b4970 CH |
1566 | error = nvmet_init_discovery(); |
1567 | if (error) | |
04db0e5e | 1568 | goto out_free_work_queue; |
a07b4970 CH |
1569 | |
1570 | error = nvmet_init_configfs(); | |
1571 | if (error) | |
1572 | goto out_exit_discovery; | |
1573 | return 0; | |
1574 | ||
1575 | out_exit_discovery: | |
1576 | nvmet_exit_discovery(); | |
04db0e5e CK |
1577 | out_free_work_queue: |
1578 | destroy_workqueue(buffered_io_wq); | |
a07b4970 CH |
1579 | out: |
1580 | return error; | |
1581 | } | |
1582 | ||
1583 | static void __exit nvmet_exit(void) | |
1584 | { | |
1585 | nvmet_exit_configfs(); | |
1586 | nvmet_exit_discovery(); | |
15fbad96 | 1587 | ida_destroy(&cntlid_ida); |
55eb942e | 1588 | destroy_workqueue(buffered_io_wq); |
a07b4970 CH |
1589 | |
1590 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); | |
1591 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); | |
1592 | } | |
1593 | ||
1594 | module_init(nvmet_init); | |
1595 | module_exit(nvmet_exit); | |
1596 | ||
1597 | MODULE_LICENSE("GPL v2"); |