Commit | Line | Data |
---|---|---|
e399441d JS |
1 | /* |
2 | * Copyright (c) 2016 Avago Technologies. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful. | |
9 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | |
10 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | |
11 | * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO | |
12 | * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. | |
13 | * See the GNU General Public License for more details, a copy of which | |
14 | * can be found in the file COPYING included with this package | |
15 | * | |
16 | */ | |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
18 | #include <linux/module.h> | |
19 | #include <linux/parser.h> | |
20 | #include <uapi/scsi/fc/fc_fs.h> | |
21 | #include <uapi/scsi/fc/fc_els.h> | |
61bff8ef | 22 | #include <linux/delay.h> |
e399441d JS |
23 | |
24 | #include "nvme.h" | |
25 | #include "fabrics.h" | |
26 | #include <linux/nvme-fc-driver.h> | |
27 | #include <linux/nvme-fc.h> | |
28 | ||
29 | ||
30 | /* *************************** Data Structures/Defines ****************** */ | |
31 | ||
32 | ||
e399441d | 33 | enum nvme_fc_queue_flags { |
26c0a26d JA |
34 | NVME_FC_Q_CONNECTED = 0, |
35 | NVME_FC_Q_LIVE, | |
e399441d JS |
36 | }; |
37 | ||
ac7fe82b JS |
38 | #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ |
39 | ||
e399441d JS |
40 | struct nvme_fc_queue { |
41 | struct nvme_fc_ctrl *ctrl; | |
42 | struct device *dev; | |
43 | struct blk_mq_hw_ctx *hctx; | |
44 | void *lldd_handle; | |
e399441d JS |
45 | size_t cmnd_capsule_len; |
46 | u32 qnum; | |
47 | u32 rqcnt; | |
48 | u32 seqno; | |
49 | ||
50 | u64 connection_id; | |
51 | atomic_t csn; | |
52 | ||
53 | unsigned long flags; | |
54 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | |
55 | ||
8d64daf7 JS |
56 | enum nvme_fcop_flags { |
57 | FCOP_FLAGS_TERMIO = (1 << 0), | |
c3aedd22 | 58 | FCOP_FLAGS_AEN = (1 << 1), |
8d64daf7 JS |
59 | }; |
60 | ||
e399441d JS |
61 | struct nvmefc_ls_req_op { |
62 | struct nvmefc_ls_req ls_req; | |
63 | ||
c913a8b0 | 64 | struct nvme_fc_rport *rport; |
e399441d JS |
65 | struct nvme_fc_queue *queue; |
66 | struct request *rq; | |
8d64daf7 | 67 | u32 flags; |
e399441d JS |
68 | |
69 | int ls_error; | |
70 | struct completion ls_done; | |
c913a8b0 | 71 | struct list_head lsreq_list; /* rport->ls_req_list */ |
e399441d JS |
72 | bool req_queued; |
73 | }; | |
74 | ||
75 | enum nvme_fcpop_state { | |
76 | FCPOP_STATE_UNINIT = 0, | |
77 | FCPOP_STATE_IDLE = 1, | |
78 | FCPOP_STATE_ACTIVE = 2, | |
79 | FCPOP_STATE_ABORTED = 3, | |
78a7ac26 | 80 | FCPOP_STATE_COMPLETE = 4, |
e399441d JS |
81 | }; |
82 | ||
83 | struct nvme_fc_fcp_op { | |
84 | struct nvme_request nreq; /* | |
85 | * nvme/host/core.c | |
86 | * requires this to be | |
87 | * the 1st element in the | |
88 | * private structure | |
89 | * associated with the | |
90 | * request. | |
91 | */ | |
92 | struct nvmefc_fcp_req fcp_req; | |
93 | ||
94 | struct nvme_fc_ctrl *ctrl; | |
95 | struct nvme_fc_queue *queue; | |
96 | struct request *rq; | |
97 | ||
98 | atomic_t state; | |
78a7ac26 | 99 | u32 flags; |
e399441d JS |
100 | u32 rqno; |
101 | u32 nents; | |
102 | ||
103 | struct nvme_fc_cmd_iu cmd_iu; | |
104 | struct nvme_fc_ersp_iu rsp_iu; | |
105 | }; | |
106 | ||
107 | struct nvme_fc_lport { | |
108 | struct nvme_fc_local_port localport; | |
109 | ||
110 | struct ida endp_cnt; | |
111 | struct list_head port_list; /* nvme_fc_port_list */ | |
112 | struct list_head endp_list; | |
113 | struct device *dev; /* physical device for dma */ | |
114 | struct nvme_fc_port_template *ops; | |
115 | struct kref ref; | |
158bfb88 | 116 | atomic_t act_rport_cnt; |
e399441d JS |
117 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ |
118 | ||
119 | struct nvme_fc_rport { | |
120 | struct nvme_fc_remote_port remoteport; | |
121 | ||
122 | struct list_head endp_list; /* for lport->endp_list */ | |
123 | struct list_head ctrl_list; | |
c913a8b0 JS |
124 | struct list_head ls_req_list; |
125 | struct device *dev; /* physical device for dma */ | |
126 | struct nvme_fc_lport *lport; | |
e399441d JS |
127 | spinlock_t lock; |
128 | struct kref ref; | |
158bfb88 | 129 | atomic_t act_ctrl_cnt; |
2b632970 | 130 | unsigned long dev_loss_end; |
e399441d JS |
131 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ |
132 | ||
61bff8ef JS |
133 | enum nvme_fcctrl_flags { |
134 | FCCTRL_TERMIO = (1 << 0), | |
e399441d JS |
135 | }; |
136 | ||
137 | struct nvme_fc_ctrl { | |
138 | spinlock_t lock; | |
139 | struct nvme_fc_queue *queues; | |
e399441d JS |
140 | struct device *dev; |
141 | struct nvme_fc_lport *lport; | |
142 | struct nvme_fc_rport *rport; | |
143 | u32 cnum; | |
144 | ||
158bfb88 | 145 | bool assoc_active; |
e399441d JS |
146 | u64 association_id; |
147 | ||
e399441d | 148 | struct list_head ctrl_list; /* rport->ctrl_list */ |
e399441d JS |
149 | |
150 | struct blk_mq_tag_set admin_tag_set; | |
151 | struct blk_mq_tag_set tag_set; | |
152 | ||
61bff8ef | 153 | struct delayed_work connect_work; |
61bff8ef | 154 | |
e399441d | 155 | struct kref ref; |
61bff8ef JS |
156 | u32 flags; |
157 | u32 iocnt; | |
36715cf4 | 158 | wait_queue_head_t ioabort_wait; |
e399441d | 159 | |
38dabe21 | 160 | struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; |
e399441d JS |
161 | |
162 | struct nvme_ctrl ctrl; | |
163 | }; | |
164 | ||
165 | static inline struct nvme_fc_ctrl * | |
166 | to_fc_ctrl(struct nvme_ctrl *ctrl) | |
167 | { | |
168 | return container_of(ctrl, struct nvme_fc_ctrl, ctrl); | |
169 | } | |
170 | ||
171 | static inline struct nvme_fc_lport * | |
172 | localport_to_lport(struct nvme_fc_local_port *portptr) | |
173 | { | |
174 | return container_of(portptr, struct nvme_fc_lport, localport); | |
175 | } | |
176 | ||
177 | static inline struct nvme_fc_rport * | |
178 | remoteport_to_rport(struct nvme_fc_remote_port *portptr) | |
179 | { | |
180 | return container_of(portptr, struct nvme_fc_rport, remoteport); | |
181 | } | |
182 | ||
183 | static inline struct nvmefc_ls_req_op * | |
184 | ls_req_to_lsop(struct nvmefc_ls_req *lsreq) | |
185 | { | |
186 | return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); | |
187 | } | |
188 | ||
189 | static inline struct nvme_fc_fcp_op * | |
190 | fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) | |
191 | { | |
192 | return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); | |
193 | } | |
194 | ||
195 | ||
196 | ||
197 | /* *************************** Globals **************************** */ | |
198 | ||
199 | ||
200 | static DEFINE_SPINLOCK(nvme_fc_lock); | |
201 | ||
202 | static LIST_HEAD(nvme_fc_lport_list); | |
203 | static DEFINE_IDA(nvme_fc_local_port_cnt); | |
204 | static DEFINE_IDA(nvme_fc_ctrl_cnt); | |
205 | ||
e399441d JS |
206 | |
207 | ||
5f568556 JS |
208 | /* |
209 | * These items are short-term. They will eventually be moved into | |
210 | * a generic FC class. See comments in module init. | |
211 | */ | |
212 | static struct class *fc_class; | |
213 | static struct device *fc_udev_device; | |
214 | ||
e399441d JS |
215 | |
216 | /* *********************** FC-NVME Port Management ************************ */ | |
217 | ||
e399441d JS |
218 | static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, |
219 | struct nvme_fc_queue *, unsigned int); | |
220 | ||
5533d424 JS |
221 | static void |
222 | nvme_fc_free_lport(struct kref *ref) | |
223 | { | |
224 | struct nvme_fc_lport *lport = | |
225 | container_of(ref, struct nvme_fc_lport, ref); | |
226 | unsigned long flags; | |
227 | ||
228 | WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); | |
229 | WARN_ON(!list_empty(&lport->endp_list)); | |
230 | ||
231 | /* remove from transport list */ | |
232 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
233 | list_del(&lport->port_list); | |
234 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
235 | ||
5533d424 JS |
236 | ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); |
237 | ida_destroy(&lport->endp_cnt); | |
238 | ||
239 | put_device(lport->dev); | |
240 | ||
241 | kfree(lport); | |
242 | } | |
243 | ||
244 | static void | |
245 | nvme_fc_lport_put(struct nvme_fc_lport *lport) | |
246 | { | |
247 | kref_put(&lport->ref, nvme_fc_free_lport); | |
248 | } | |
249 | ||
250 | static int | |
251 | nvme_fc_lport_get(struct nvme_fc_lport *lport) | |
252 | { | |
253 | return kref_get_unless_zero(&lport->ref); | |
254 | } | |
255 | ||
256 | ||
257 | static struct nvme_fc_lport * | |
c5760f30 JS |
258 | nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, |
259 | struct nvme_fc_port_template *ops, | |
260 | struct device *dev) | |
5533d424 JS |
261 | { |
262 | struct nvme_fc_lport *lport; | |
263 | unsigned long flags; | |
264 | ||
265 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
266 | ||
267 | list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { | |
268 | if (lport->localport.node_name != pinfo->node_name || | |
269 | lport->localport.port_name != pinfo->port_name) | |
270 | continue; | |
271 | ||
c5760f30 JS |
272 | if (lport->dev != dev) { |
273 | lport = ERR_PTR(-EXDEV); | |
274 | goto out_done; | |
275 | } | |
276 | ||
5533d424 JS |
277 | if (lport->localport.port_state != FC_OBJSTATE_DELETED) { |
278 | lport = ERR_PTR(-EEXIST); | |
279 | goto out_done; | |
280 | } | |
281 | ||
282 | if (!nvme_fc_lport_get(lport)) { | |
283 | /* | |
284 | * fails if ref cnt already 0. If so, | |
285 | * act as if lport already deleted | |
286 | */ | |
287 | lport = NULL; | |
288 | goto out_done; | |
289 | } | |
290 | ||
291 | /* resume the lport */ | |
292 | ||
c5760f30 | 293 | lport->ops = ops; |
5533d424 JS |
294 | lport->localport.port_role = pinfo->port_role; |
295 | lport->localport.port_id = pinfo->port_id; | |
296 | lport->localport.port_state = FC_OBJSTATE_ONLINE; | |
297 | ||
298 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
299 | ||
300 | return lport; | |
301 | } | |
302 | ||
303 | lport = NULL; | |
304 | ||
305 | out_done: | |
306 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
307 | ||
308 | return lport; | |
309 | } | |
e399441d JS |
310 | |
311 | /** | |
312 | * nvme_fc_register_localport - transport entry point called by an | |
313 | * LLDD to register the existence of a NVME | |
314 | * host FC port. | |
315 | * @pinfo: pointer to information about the port to be registered | |
316 | * @template: LLDD entrypoints and operational parameters for the port | |
317 | * @dev: physical hardware device node port corresponds to. Will be | |
318 | * used for DMA mappings | |
319 | * @lport_p: pointer to a local port pointer. Upon success, the routine | |
320 | * will allocate a nvme_fc_local_port structure and place its | |
321 | * address in the local port pointer. Upon failure, local port | |
322 | * pointer will be set to 0. | |
323 | * | |
324 | * Returns: | |
325 | * a completion status. Must be 0 upon success; a negative errno | |
326 | * (ex: -ENXIO) upon failure. | |
327 | */ | |
328 | int | |
329 | nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, | |
330 | struct nvme_fc_port_template *template, | |
331 | struct device *dev, | |
332 | struct nvme_fc_local_port **portptr) | |
333 | { | |
334 | struct nvme_fc_lport *newrec; | |
335 | unsigned long flags; | |
336 | int ret, idx; | |
337 | ||
338 | if (!template->localport_delete || !template->remoteport_delete || | |
339 | !template->ls_req || !template->fcp_io || | |
340 | !template->ls_abort || !template->fcp_abort || | |
341 | !template->max_hw_queues || !template->max_sgl_segments || | |
342 | !template->max_dif_sgl_segments || !template->dma_boundary) { | |
343 | ret = -EINVAL; | |
344 | goto out_reghost_failed; | |
345 | } | |
346 | ||
5533d424 JS |
347 | /* |
348 | * look to see if there is already a localport that had been | |
349 | * deregistered and in the process of waiting for all the | |
350 | * references to fully be removed. If the references haven't | |
351 | * expired, we can simply re-enable the localport. Remoteports | |
352 | * and controller reconnections should resume naturally. | |
353 | */ | |
c5760f30 | 354 | newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); |
5533d424 JS |
355 | |
356 | /* found an lport, but something about its state is bad */ | |
357 | if (IS_ERR(newrec)) { | |
358 | ret = PTR_ERR(newrec); | |
359 | goto out_reghost_failed; | |
360 | ||
361 | /* found existing lport, which was resumed */ | |
362 | } else if (newrec) { | |
363 | *portptr = &newrec->localport; | |
364 | return 0; | |
365 | } | |
366 | ||
367 | /* nothing found - allocate a new localport struct */ | |
368 | ||
e399441d JS |
369 | newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), |
370 | GFP_KERNEL); | |
371 | if (!newrec) { | |
372 | ret = -ENOMEM; | |
373 | goto out_reghost_failed; | |
374 | } | |
375 | ||
376 | idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); | |
377 | if (idx < 0) { | |
378 | ret = -ENOSPC; | |
379 | goto out_fail_kfree; | |
380 | } | |
381 | ||
382 | if (!get_device(dev) && dev) { | |
383 | ret = -ENODEV; | |
384 | goto out_ida_put; | |
385 | } | |
386 | ||
387 | INIT_LIST_HEAD(&newrec->port_list); | |
388 | INIT_LIST_HEAD(&newrec->endp_list); | |
389 | kref_init(&newrec->ref); | |
158bfb88 | 390 | atomic_set(&newrec->act_rport_cnt, 0); |
e399441d JS |
391 | newrec->ops = template; |
392 | newrec->dev = dev; | |
393 | ida_init(&newrec->endp_cnt); | |
394 | newrec->localport.private = &newrec[1]; | |
395 | newrec->localport.node_name = pinfo->node_name; | |
396 | newrec->localport.port_name = pinfo->port_name; | |
397 | newrec->localport.port_role = pinfo->port_role; | |
398 | newrec->localport.port_id = pinfo->port_id; | |
399 | newrec->localport.port_state = FC_OBJSTATE_ONLINE; | |
400 | newrec->localport.port_num = idx; | |
401 | ||
402 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
403 | list_add_tail(&newrec->port_list, &nvme_fc_lport_list); | |
404 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
405 | ||
406 | if (dev) | |
407 | dma_set_seg_boundary(dev, template->dma_boundary); | |
408 | ||
409 | *portptr = &newrec->localport; | |
410 | return 0; | |
411 | ||
412 | out_ida_put: | |
413 | ida_simple_remove(&nvme_fc_local_port_cnt, idx); | |
414 | out_fail_kfree: | |
415 | kfree(newrec); | |
416 | out_reghost_failed: | |
417 | *portptr = NULL; | |
418 | ||
419 | return ret; | |
420 | } | |
421 | EXPORT_SYMBOL_GPL(nvme_fc_register_localport); | |
422 | ||
e399441d JS |
423 | /** |
424 | * nvme_fc_unregister_localport - transport entry point called by an | |
425 | * LLDD to deregister/remove a previously | |
426 | * registered a NVME host FC port. | |
427 | * @localport: pointer to the (registered) local port that is to be | |
428 | * deregistered. | |
429 | * | |
430 | * Returns: | |
431 | * a completion status. Must be 0 upon success; a negative errno | |
432 | * (ex: -ENXIO) upon failure. | |
433 | */ | |
434 | int | |
435 | nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) | |
436 | { | |
437 | struct nvme_fc_lport *lport = localport_to_lport(portptr); | |
438 | unsigned long flags; | |
439 | ||
440 | if (!portptr) | |
441 | return -EINVAL; | |
442 | ||
443 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
444 | ||
445 | if (portptr->port_state != FC_OBJSTATE_ONLINE) { | |
446 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
447 | return -EINVAL; | |
448 | } | |
449 | portptr->port_state = FC_OBJSTATE_DELETED; | |
450 | ||
451 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
452 | ||
158bfb88 JS |
453 | if (atomic_read(&lport->act_rport_cnt) == 0) |
454 | lport->ops->localport_delete(&lport->localport); | |
455 | ||
e399441d JS |
456 | nvme_fc_lport_put(lport); |
457 | ||
458 | return 0; | |
459 | } | |
460 | EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); | |
461 | ||
eaefd5ab JS |
462 | /* |
463 | * TRADDR strings, per FC-NVME are fixed format: | |
464 | * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters | |
465 | * udev event will only differ by prefix of what field is | |
466 | * being specified: | |
467 | * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters | |
468 | * 19 + 43 + null_fudge = 64 characters | |
469 | */ | |
470 | #define FCNVME_TRADDR_LENGTH 64 | |
471 | ||
472 | static void | |
473 | nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, | |
474 | struct nvme_fc_rport *rport) | |
475 | { | |
476 | char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ | |
477 | char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ | |
478 | char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; | |
479 | ||
480 | if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) | |
481 | return; | |
482 | ||
483 | snprintf(hostaddr, sizeof(hostaddr), | |
484 | "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", | |
485 | lport->localport.node_name, lport->localport.port_name); | |
486 | snprintf(tgtaddr, sizeof(tgtaddr), | |
487 | "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", | |
488 | rport->remoteport.node_name, rport->remoteport.port_name); | |
489 | kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); | |
490 | } | |
491 | ||
469d0ef0 JS |
492 | static void |
493 | nvme_fc_free_rport(struct kref *ref) | |
494 | { | |
495 | struct nvme_fc_rport *rport = | |
496 | container_of(ref, struct nvme_fc_rport, ref); | |
497 | struct nvme_fc_lport *lport = | |
498 | localport_to_lport(rport->remoteport.localport); | |
499 | unsigned long flags; | |
500 | ||
501 | WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); | |
502 | WARN_ON(!list_empty(&rport->ctrl_list)); | |
503 | ||
504 | /* remove from lport list */ | |
505 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
506 | list_del(&rport->endp_list); | |
507 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
508 | ||
469d0ef0 JS |
509 | ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); |
510 | ||
511 | kfree(rport); | |
512 | ||
513 | nvme_fc_lport_put(lport); | |
514 | } | |
515 | ||
516 | static void | |
517 | nvme_fc_rport_put(struct nvme_fc_rport *rport) | |
518 | { | |
519 | kref_put(&rport->ref, nvme_fc_free_rport); | |
520 | } | |
521 | ||
522 | static int | |
523 | nvme_fc_rport_get(struct nvme_fc_rport *rport) | |
524 | { | |
525 | return kref_get_unless_zero(&rport->ref); | |
526 | } | |
527 | ||
2b632970 JS |
528 | static void |
529 | nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) | |
530 | { | |
531 | switch (ctrl->ctrl.state) { | |
532 | case NVME_CTRL_NEW: | |
ad6a0a52 | 533 | case NVME_CTRL_CONNECTING: |
2b632970 JS |
534 | /* |
535 | * As all reconnects were suppressed, schedule a | |
536 | * connect. | |
537 | */ | |
538 | dev_info(ctrl->ctrl.device, | |
539 | "NVME-FC{%d}: connectivity re-established. " | |
540 | "Attempting reconnect\n", ctrl->cnum); | |
541 | ||
542 | queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); | |
543 | break; | |
544 | ||
545 | case NVME_CTRL_RESETTING: | |
546 | /* | |
547 | * Controller is already in the process of terminating the | |
548 | * association. No need to do anything further. The reconnect | |
549 | * step will naturally occur after the reset completes. | |
550 | */ | |
551 | break; | |
552 | ||
553 | default: | |
554 | /* no action to take - let it delete */ | |
555 | break; | |
556 | } | |
557 | } | |
558 | ||
559 | static struct nvme_fc_rport * | |
560 | nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, | |
561 | struct nvme_fc_port_info *pinfo) | |
562 | { | |
563 | struct nvme_fc_rport *rport; | |
564 | struct nvme_fc_ctrl *ctrl; | |
565 | unsigned long flags; | |
566 | ||
567 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
568 | ||
569 | list_for_each_entry(rport, &lport->endp_list, endp_list) { | |
570 | if (rport->remoteport.node_name != pinfo->node_name || | |
571 | rport->remoteport.port_name != pinfo->port_name) | |
572 | continue; | |
573 | ||
574 | if (!nvme_fc_rport_get(rport)) { | |
575 | rport = ERR_PTR(-ENOLCK); | |
576 | goto out_done; | |
577 | } | |
578 | ||
579 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
580 | ||
581 | spin_lock_irqsave(&rport->lock, flags); | |
582 | ||
583 | /* has it been unregistered */ | |
584 | if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { | |
585 | /* means lldd called us twice */ | |
586 | spin_unlock_irqrestore(&rport->lock, flags); | |
587 | nvme_fc_rport_put(rport); | |
588 | return ERR_PTR(-ESTALE); | |
589 | } | |
590 | ||
591 | rport->remoteport.port_state = FC_OBJSTATE_ONLINE; | |
592 | rport->dev_loss_end = 0; | |
593 | ||
594 | /* | |
595 | * kick off a reconnect attempt on all associations to the | |
596 | * remote port. A successful reconnects will resume i/o. | |
597 | */ | |
598 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) | |
599 | nvme_fc_resume_controller(ctrl); | |
600 | ||
601 | spin_unlock_irqrestore(&rport->lock, flags); | |
602 | ||
603 | return rport; | |
604 | } | |
605 | ||
606 | rport = NULL; | |
607 | ||
608 | out_done: | |
609 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
610 | ||
611 | return rport; | |
612 | } | |
613 | ||
614 | static inline void | |
615 | __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, | |
616 | struct nvme_fc_port_info *pinfo) | |
617 | { | |
618 | if (pinfo->dev_loss_tmo) | |
619 | rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; | |
620 | else | |
621 | rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; | |
622 | } | |
623 | ||
e399441d JS |
624 | /** |
625 | * nvme_fc_register_remoteport - transport entry point called by an | |
626 | * LLDD to register the existence of a NVME | |
627 | * subsystem FC port on its fabric. | |
628 | * @localport: pointer to the (registered) local port that the remote | |
629 | * subsystem port is connected to. | |
630 | * @pinfo: pointer to information about the port to be registered | |
631 | * @rport_p: pointer to a remote port pointer. Upon success, the routine | |
632 | * will allocate a nvme_fc_remote_port structure and place its | |
633 | * address in the remote port pointer. Upon failure, remote port | |
634 | * pointer will be set to 0. | |
635 | * | |
636 | * Returns: | |
637 | * a completion status. Must be 0 upon success; a negative errno | |
638 | * (ex: -ENXIO) upon failure. | |
639 | */ | |
640 | int | |
641 | nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, | |
642 | struct nvme_fc_port_info *pinfo, | |
643 | struct nvme_fc_remote_port **portptr) | |
644 | { | |
645 | struct nvme_fc_lport *lport = localport_to_lport(localport); | |
646 | struct nvme_fc_rport *newrec; | |
647 | unsigned long flags; | |
648 | int ret, idx; | |
649 | ||
2b632970 JS |
650 | if (!nvme_fc_lport_get(lport)) { |
651 | ret = -ESHUTDOWN; | |
652 | goto out_reghost_failed; | |
653 | } | |
654 | ||
655 | /* | |
656 | * look to see if there is already a remoteport that is waiting | |
657 | * for a reconnect (within dev_loss_tmo) with the same WWN's. | |
658 | * If so, transition to it and reconnect. | |
659 | */ | |
660 | newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); | |
661 | ||
662 | /* found an rport, but something about its state is bad */ | |
663 | if (IS_ERR(newrec)) { | |
664 | ret = PTR_ERR(newrec); | |
665 | goto out_lport_put; | |
666 | ||
667 | /* found existing rport, which was resumed */ | |
668 | } else if (newrec) { | |
669 | nvme_fc_lport_put(lport); | |
670 | __nvme_fc_set_dev_loss_tmo(newrec, pinfo); | |
671 | nvme_fc_signal_discovery_scan(lport, newrec); | |
672 | *portptr = &newrec->remoteport; | |
673 | return 0; | |
674 | } | |
675 | ||
676 | /* nothing found - allocate a new remoteport struct */ | |
677 | ||
e399441d JS |
678 | newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), |
679 | GFP_KERNEL); | |
680 | if (!newrec) { | |
681 | ret = -ENOMEM; | |
2b632970 | 682 | goto out_lport_put; |
e399441d JS |
683 | } |
684 | ||
685 | idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); | |
686 | if (idx < 0) { | |
687 | ret = -ENOSPC; | |
2b632970 | 688 | goto out_kfree_rport; |
e399441d JS |
689 | } |
690 | ||
691 | INIT_LIST_HEAD(&newrec->endp_list); | |
692 | INIT_LIST_HEAD(&newrec->ctrl_list); | |
c913a8b0 | 693 | INIT_LIST_HEAD(&newrec->ls_req_list); |
e399441d | 694 | kref_init(&newrec->ref); |
158bfb88 | 695 | atomic_set(&newrec->act_ctrl_cnt, 0); |
e399441d JS |
696 | spin_lock_init(&newrec->lock); |
697 | newrec->remoteport.localport = &lport->localport; | |
c913a8b0 JS |
698 | newrec->dev = lport->dev; |
699 | newrec->lport = lport; | |
e399441d JS |
700 | newrec->remoteport.private = &newrec[1]; |
701 | newrec->remoteport.port_role = pinfo->port_role; | |
702 | newrec->remoteport.node_name = pinfo->node_name; | |
703 | newrec->remoteport.port_name = pinfo->port_name; | |
704 | newrec->remoteport.port_id = pinfo->port_id; | |
705 | newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; | |
706 | newrec->remoteport.port_num = idx; | |
2b632970 | 707 | __nvme_fc_set_dev_loss_tmo(newrec, pinfo); |
e399441d JS |
708 | |
709 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
710 | list_add_tail(&newrec->endp_list, &lport->endp_list); | |
711 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
712 | ||
eaefd5ab JS |
713 | nvme_fc_signal_discovery_scan(lport, newrec); |
714 | ||
e399441d JS |
715 | *portptr = &newrec->remoteport; |
716 | return 0; | |
717 | ||
e399441d JS |
718 | out_kfree_rport: |
719 | kfree(newrec); | |
2b632970 JS |
720 | out_lport_put: |
721 | nvme_fc_lport_put(lport); | |
e399441d JS |
722 | out_reghost_failed: |
723 | *portptr = NULL; | |
724 | return ret; | |
e399441d JS |
725 | } |
726 | EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); | |
727 | ||
8d64daf7 JS |
728 | static int |
729 | nvme_fc_abort_lsops(struct nvme_fc_rport *rport) | |
730 | { | |
731 | struct nvmefc_ls_req_op *lsop; | |
732 | unsigned long flags; | |
733 | ||
734 | restart: | |
735 | spin_lock_irqsave(&rport->lock, flags); | |
736 | ||
737 | list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { | |
738 | if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { | |
739 | lsop->flags |= FCOP_FLAGS_TERMIO; | |
740 | spin_unlock_irqrestore(&rport->lock, flags); | |
741 | rport->lport->ops->ls_abort(&rport->lport->localport, | |
742 | &rport->remoteport, | |
743 | &lsop->ls_req); | |
744 | goto restart; | |
745 | } | |
746 | } | |
747 | spin_unlock_irqrestore(&rport->lock, flags); | |
748 | ||
749 | return 0; | |
750 | } | |
751 | ||
2b632970 JS |
752 | static void |
753 | nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) | |
754 | { | |
755 | dev_info(ctrl->ctrl.device, | |
756 | "NVME-FC{%d}: controller connectivity lost. Awaiting " | |
757 | "Reconnect", ctrl->cnum); | |
758 | ||
759 | switch (ctrl->ctrl.state) { | |
760 | case NVME_CTRL_NEW: | |
761 | case NVME_CTRL_LIVE: | |
762 | /* | |
763 | * Schedule a controller reset. The reset will terminate the | |
764 | * association and schedule the reconnect timer. Reconnects | |
765 | * will be attempted until either the ctlr_loss_tmo | |
766 | * (max_retries * connect_delay) expires or the remoteport's | |
767 | * dev_loss_tmo expires. | |
768 | */ | |
769 | if (nvme_reset_ctrl(&ctrl->ctrl)) { | |
770 | dev_warn(ctrl->ctrl.device, | |
771 | "NVME-FC{%d}: Couldn't schedule reset. " | |
772 | "Deleting controller.\n", | |
773 | ctrl->cnum); | |
774 | nvme_delete_ctrl(&ctrl->ctrl); | |
775 | } | |
776 | break; | |
777 | ||
ad6a0a52 | 778 | case NVME_CTRL_CONNECTING: |
2b632970 JS |
779 | /* |
780 | * The association has already been terminated and the | |
781 | * controller is attempting reconnects. No need to do anything | |
782 | * futher. Reconnects will be attempted until either the | |
783 | * ctlr_loss_tmo (max_retries * connect_delay) expires or the | |
784 | * remoteport's dev_loss_tmo expires. | |
785 | */ | |
786 | break; | |
787 | ||
788 | case NVME_CTRL_RESETTING: | |
789 | /* | |
790 | * Controller is already in the process of terminating the | |
791 | * association. No need to do anything further. The reconnect | |
792 | * step will kick in naturally after the association is | |
793 | * terminated. | |
794 | */ | |
795 | break; | |
796 | ||
797 | case NVME_CTRL_DELETING: | |
798 | default: | |
799 | /* no action to take - let it delete */ | |
800 | break; | |
801 | } | |
802 | } | |
803 | ||
e399441d JS |
804 | /** |
805 | * nvme_fc_unregister_remoteport - transport entry point called by an | |
806 | * LLDD to deregister/remove a previously | |
807 | * registered a NVME subsystem FC port. | |
808 | * @remoteport: pointer to the (registered) remote port that is to be | |
809 | * deregistered. | |
810 | * | |
811 | * Returns: | |
812 | * a completion status. Must be 0 upon success; a negative errno | |
813 | * (ex: -ENXIO) upon failure. | |
814 | */ | |
815 | int | |
816 | nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) | |
817 | { | |
818 | struct nvme_fc_rport *rport = remoteport_to_rport(portptr); | |
819 | struct nvme_fc_ctrl *ctrl; | |
820 | unsigned long flags; | |
821 | ||
822 | if (!portptr) | |
823 | return -EINVAL; | |
824 | ||
825 | spin_lock_irqsave(&rport->lock, flags); | |
826 | ||
827 | if (portptr->port_state != FC_OBJSTATE_ONLINE) { | |
828 | spin_unlock_irqrestore(&rport->lock, flags); | |
829 | return -EINVAL; | |
830 | } | |
831 | portptr->port_state = FC_OBJSTATE_DELETED; | |
832 | ||
2b632970 JS |
833 | rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); |
834 | ||
835 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { | |
836 | /* if dev_loss_tmo==0, dev loss is immediate */ | |
837 | if (!portptr->dev_loss_tmo) { | |
838 | dev_warn(ctrl->ctrl.device, | |
839 | "NVME-FC{%d}: controller connectivity lost. " | |
840 | "Deleting controller.\n", | |
841 | ctrl->cnum); | |
842 | nvme_delete_ctrl(&ctrl->ctrl); | |
843 | } else | |
844 | nvme_fc_ctrl_connectivity_loss(ctrl); | |
845 | } | |
e399441d JS |
846 | |
847 | spin_unlock_irqrestore(&rport->lock, flags); | |
848 | ||
8d64daf7 JS |
849 | nvme_fc_abort_lsops(rport); |
850 | ||
158bfb88 JS |
851 | if (atomic_read(&rport->act_ctrl_cnt) == 0) |
852 | rport->lport->ops->remoteport_delete(portptr); | |
853 | ||
2b632970 JS |
854 | /* |
855 | * release the reference, which will allow, if all controllers | |
856 | * go away, which should only occur after dev_loss_tmo occurs, | |
857 | * for the rport to be torn down. | |
858 | */ | |
e399441d | 859 | nvme_fc_rport_put(rport); |
2b632970 | 860 | |
e399441d JS |
861 | return 0; |
862 | } | |
863 | EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); | |
864 | ||
eaefd5ab JS |
865 | /** |
866 | * nvme_fc_rescan_remoteport - transport entry point called by an | |
867 | * LLDD to request a nvme device rescan. | |
868 | * @remoteport: pointer to the (registered) remote port that is to be | |
869 | * rescanned. | |
870 | * | |
871 | * Returns: N/A | |
872 | */ | |
873 | void | |
874 | nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) | |
875 | { | |
876 | struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); | |
877 | ||
878 | nvme_fc_signal_discovery_scan(rport->lport, rport); | |
879 | } | |
880 | EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); | |
881 | ||
ac7fe82b JS |
882 | int |
883 | nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, | |
884 | u32 dev_loss_tmo) | |
885 | { | |
886 | struct nvme_fc_rport *rport = remoteport_to_rport(portptr); | |
ac7fe82b JS |
887 | unsigned long flags; |
888 | ||
889 | spin_lock_irqsave(&rport->lock, flags); | |
890 | ||
891 | if (portptr->port_state != FC_OBJSTATE_ONLINE) { | |
892 | spin_unlock_irqrestore(&rport->lock, flags); | |
893 | return -EINVAL; | |
894 | } | |
895 | ||
896 | /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ | |
897 | rport->remoteport.dev_loss_tmo = dev_loss_tmo; | |
898 | ||
899 | spin_unlock_irqrestore(&rport->lock, flags); | |
900 | ||
901 | return 0; | |
902 | } | |
903 | EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); | |
904 | ||
e399441d JS |
905 | |
906 | /* *********************** FC-NVME DMA Handling **************************** */ | |
907 | ||
908 | /* | |
909 | * The fcloop device passes in a NULL device pointer. Real LLD's will | |
910 | * pass in a valid device pointer. If NULL is passed to the dma mapping | |
911 | * routines, depending on the platform, it may or may not succeed, and | |
912 | * may crash. | |
913 | * | |
914 | * As such: | |
915 | * Wrapper all the dma routines and check the dev pointer. | |
916 | * | |
917 | * If simple mappings (return just a dma address, we'll noop them, | |
918 | * returning a dma address of 0. | |
919 | * | |
920 | * On more complex mappings (dma_map_sg), a pseudo routine fills | |
921 | * in the scatter list, setting all dma addresses to 0. | |
922 | */ | |
923 | ||
924 | static inline dma_addr_t | |
925 | fc_dma_map_single(struct device *dev, void *ptr, size_t size, | |
926 | enum dma_data_direction dir) | |
927 | { | |
928 | return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; | |
929 | } | |
930 | ||
931 | static inline int | |
932 | fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
933 | { | |
934 | return dev ? dma_mapping_error(dev, dma_addr) : 0; | |
935 | } | |
936 | ||
937 | static inline void | |
938 | fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | |
939 | enum dma_data_direction dir) | |
940 | { | |
941 | if (dev) | |
942 | dma_unmap_single(dev, addr, size, dir); | |
943 | } | |
944 | ||
945 | static inline void | |
946 | fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, | |
947 | enum dma_data_direction dir) | |
948 | { | |
949 | if (dev) | |
950 | dma_sync_single_for_cpu(dev, addr, size, dir); | |
951 | } | |
952 | ||
953 | static inline void | |
954 | fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, | |
955 | enum dma_data_direction dir) | |
956 | { | |
957 | if (dev) | |
958 | dma_sync_single_for_device(dev, addr, size, dir); | |
959 | } | |
960 | ||
961 | /* pseudo dma_map_sg call */ | |
962 | static int | |
963 | fc_map_sg(struct scatterlist *sg, int nents) | |
964 | { | |
965 | struct scatterlist *s; | |
966 | int i; | |
967 | ||
968 | WARN_ON(nents == 0 || sg[0].length == 0); | |
969 | ||
970 | for_each_sg(sg, s, nents, i) { | |
971 | s->dma_address = 0L; | |
972 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | |
973 | s->dma_length = s->length; | |
974 | #endif | |
975 | } | |
976 | return nents; | |
977 | } | |
978 | ||
979 | static inline int | |
980 | fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
981 | enum dma_data_direction dir) | |
982 | { | |
983 | return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); | |
984 | } | |
985 | ||
986 | static inline void | |
987 | fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
988 | enum dma_data_direction dir) | |
989 | { | |
990 | if (dev) | |
991 | dma_unmap_sg(dev, sg, nents, dir); | |
992 | } | |
993 | ||
e399441d JS |
994 | /* *********************** FC-NVME LS Handling **************************** */ |
995 | ||
996 | static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); | |
997 | static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); | |
998 | ||
999 | ||
1000 | static void | |
c913a8b0 | 1001 | __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) |
e399441d | 1002 | { |
c913a8b0 | 1003 | struct nvme_fc_rport *rport = lsop->rport; |
e399441d JS |
1004 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
1005 | unsigned long flags; | |
1006 | ||
c913a8b0 | 1007 | spin_lock_irqsave(&rport->lock, flags); |
e399441d JS |
1008 | |
1009 | if (!lsop->req_queued) { | |
c913a8b0 | 1010 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d JS |
1011 | return; |
1012 | } | |
1013 | ||
1014 | list_del(&lsop->lsreq_list); | |
1015 | ||
1016 | lsop->req_queued = false; | |
1017 | ||
c913a8b0 | 1018 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d | 1019 | |
c913a8b0 | 1020 | fc_dma_unmap_single(rport->dev, lsreq->rqstdma, |
e399441d JS |
1021 | (lsreq->rqstlen + lsreq->rsplen), |
1022 | DMA_BIDIRECTIONAL); | |
1023 | ||
c913a8b0 | 1024 | nvme_fc_rport_put(rport); |
e399441d JS |
1025 | } |
1026 | ||
1027 | static int | |
c913a8b0 | 1028 | __nvme_fc_send_ls_req(struct nvme_fc_rport *rport, |
e399441d JS |
1029 | struct nvmefc_ls_req_op *lsop, |
1030 | void (*done)(struct nvmefc_ls_req *req, int status)) | |
1031 | { | |
1032 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; | |
1033 | unsigned long flags; | |
c913a8b0 | 1034 | int ret = 0; |
e399441d | 1035 | |
c913a8b0 JS |
1036 | if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) |
1037 | return -ECONNREFUSED; | |
1038 | ||
1039 | if (!nvme_fc_rport_get(rport)) | |
e399441d JS |
1040 | return -ESHUTDOWN; |
1041 | ||
1042 | lsreq->done = done; | |
c913a8b0 | 1043 | lsop->rport = rport; |
e399441d JS |
1044 | lsop->req_queued = false; |
1045 | INIT_LIST_HEAD(&lsop->lsreq_list); | |
1046 | init_completion(&lsop->ls_done); | |
1047 | ||
c913a8b0 | 1048 | lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, |
e399441d JS |
1049 | lsreq->rqstlen + lsreq->rsplen, |
1050 | DMA_BIDIRECTIONAL); | |
c913a8b0 JS |
1051 | if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { |
1052 | ret = -EFAULT; | |
1053 | goto out_putrport; | |
e399441d JS |
1054 | } |
1055 | lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; | |
1056 | ||
c913a8b0 | 1057 | spin_lock_irqsave(&rport->lock, flags); |
e399441d | 1058 | |
c913a8b0 | 1059 | list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); |
e399441d JS |
1060 | |
1061 | lsop->req_queued = true; | |
1062 | ||
c913a8b0 | 1063 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d | 1064 | |
c913a8b0 JS |
1065 | ret = rport->lport->ops->ls_req(&rport->lport->localport, |
1066 | &rport->remoteport, lsreq); | |
e399441d | 1067 | if (ret) |
c913a8b0 JS |
1068 | goto out_unlink; |
1069 | ||
1070 | return 0; | |
1071 | ||
1072 | out_unlink: | |
1073 | lsop->ls_error = ret; | |
1074 | spin_lock_irqsave(&rport->lock, flags); | |
1075 | lsop->req_queued = false; | |
1076 | list_del(&lsop->lsreq_list); | |
1077 | spin_unlock_irqrestore(&rport->lock, flags); | |
1078 | fc_dma_unmap_single(rport->dev, lsreq->rqstdma, | |
1079 | (lsreq->rqstlen + lsreq->rsplen), | |
1080 | DMA_BIDIRECTIONAL); | |
1081 | out_putrport: | |
1082 | nvme_fc_rport_put(rport); | |
e399441d JS |
1083 | |
1084 | return ret; | |
1085 | } | |
1086 | ||
1087 | static void | |
1088 | nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) | |
1089 | { | |
1090 | struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); | |
1091 | ||
1092 | lsop->ls_error = status; | |
1093 | complete(&lsop->ls_done); | |
1094 | } | |
1095 | ||
1096 | static int | |
c913a8b0 | 1097 | nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) |
e399441d JS |
1098 | { |
1099 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; | |
1100 | struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; | |
1101 | int ret; | |
1102 | ||
c913a8b0 | 1103 | ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); |
e399441d | 1104 | |
c913a8b0 | 1105 | if (!ret) { |
e399441d JS |
1106 | /* |
1107 | * No timeout/not interruptible as we need the struct | |
1108 | * to exist until the lldd calls us back. Thus mandate | |
1109 | * wait until driver calls back. lldd responsible for | |
1110 | * the timeout action | |
1111 | */ | |
1112 | wait_for_completion(&lsop->ls_done); | |
1113 | ||
c913a8b0 | 1114 | __nvme_fc_finish_ls_req(lsop); |
e399441d | 1115 | |
c913a8b0 | 1116 | ret = lsop->ls_error; |
e399441d JS |
1117 | } |
1118 | ||
c913a8b0 JS |
1119 | if (ret) |
1120 | return ret; | |
1121 | ||
e399441d JS |
1122 | /* ACC or RJT payload ? */ |
1123 | if (rjt->w0.ls_cmd == FCNVME_LS_RJT) | |
1124 | return -ENXIO; | |
1125 | ||
1126 | return 0; | |
1127 | } | |
1128 | ||
c913a8b0 JS |
1129 | static int |
1130 | nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, | |
e399441d JS |
1131 | struct nvmefc_ls_req_op *lsop, |
1132 | void (*done)(struct nvmefc_ls_req *req, int status)) | |
1133 | { | |
e399441d JS |
1134 | /* don't wait for completion */ |
1135 | ||
c913a8b0 | 1136 | return __nvme_fc_send_ls_req(rport, lsop, done); |
e399441d JS |
1137 | } |
1138 | ||
1139 | /* Validation Error indexes into the string table below */ | |
1140 | enum { | |
1141 | VERR_NO_ERROR = 0, | |
1142 | VERR_LSACC = 1, | |
1143 | VERR_LSDESC_RQST = 2, | |
1144 | VERR_LSDESC_RQST_LEN = 3, | |
1145 | VERR_ASSOC_ID = 4, | |
1146 | VERR_ASSOC_ID_LEN = 5, | |
1147 | VERR_CONN_ID = 6, | |
1148 | VERR_CONN_ID_LEN = 7, | |
1149 | VERR_CR_ASSOC = 8, | |
1150 | VERR_CR_ASSOC_ACC_LEN = 9, | |
1151 | VERR_CR_CONN = 10, | |
1152 | VERR_CR_CONN_ACC_LEN = 11, | |
1153 | VERR_DISCONN = 12, | |
1154 | VERR_DISCONN_ACC_LEN = 13, | |
1155 | }; | |
1156 | ||
1157 | static char *validation_errors[] = { | |
1158 | "OK", | |
1159 | "Not LS_ACC", | |
1160 | "Not LSDESC_RQST", | |
1161 | "Bad LSDESC_RQST Length", | |
1162 | "Not Association ID", | |
1163 | "Bad Association ID Length", | |
1164 | "Not Connection ID", | |
1165 | "Bad Connection ID Length", | |
1166 | "Not CR_ASSOC Rqst", | |
1167 | "Bad CR_ASSOC ACC Length", | |
1168 | "Not CR_CONN Rqst", | |
1169 | "Bad CR_CONN ACC Length", | |
1170 | "Not Disconnect Rqst", | |
1171 | "Bad Disconnect ACC Length", | |
1172 | }; | |
1173 | ||
1174 | static int | |
1175 | nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, | |
1176 | struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) | |
1177 | { | |
1178 | struct nvmefc_ls_req_op *lsop; | |
1179 | struct nvmefc_ls_req *lsreq; | |
1180 | struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; | |
1181 | struct fcnvme_ls_cr_assoc_acc *assoc_acc; | |
1182 | int ret, fcret = 0; | |
1183 | ||
1184 | lsop = kzalloc((sizeof(*lsop) + | |
1185 | ctrl->lport->ops->lsrqst_priv_sz + | |
1186 | sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL); | |
1187 | if (!lsop) { | |
1188 | ret = -ENOMEM; | |
1189 | goto out_no_memory; | |
1190 | } | |
1191 | lsreq = &lsop->ls_req; | |
1192 | ||
1193 | lsreq->private = (void *)&lsop[1]; | |
1194 | assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *) | |
1195 | (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); | |
1196 | assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; | |
1197 | ||
1198 | assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; | |
1199 | assoc_rqst->desc_list_len = | |
1200 | cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | |
1201 | ||
1202 | assoc_rqst->assoc_cmd.desc_tag = | |
1203 | cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); | |
1204 | assoc_rqst->assoc_cmd.desc_len = | |
1205 | fcnvme_lsdesc_len( | |
1206 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | |
1207 | ||
1208 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | |
d157e534 | 1209 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); |
e399441d JS |
1210 | /* Linux supports only Dynamic controllers */ |
1211 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); | |
8e412263 | 1212 | uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); |
e399441d JS |
1213 | strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, |
1214 | min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); | |
1215 | strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, | |
1216 | min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); | |
1217 | ||
1218 | lsop->queue = queue; | |
1219 | lsreq->rqstaddr = assoc_rqst; | |
1220 | lsreq->rqstlen = sizeof(*assoc_rqst); | |
1221 | lsreq->rspaddr = assoc_acc; | |
1222 | lsreq->rsplen = sizeof(*assoc_acc); | |
1223 | lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; | |
1224 | ||
c913a8b0 | 1225 | ret = nvme_fc_send_ls_req(ctrl->rport, lsop); |
e399441d JS |
1226 | if (ret) |
1227 | goto out_free_buffer; | |
1228 | ||
1229 | /* process connect LS completion */ | |
1230 | ||
1231 | /* validate the ACC response */ | |
1232 | if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) | |
1233 | fcret = VERR_LSACC; | |
f77fc87c | 1234 | else if (assoc_acc->hdr.desc_list_len != |
e399441d JS |
1235 | fcnvme_lsdesc_len( |
1236 | sizeof(struct fcnvme_ls_cr_assoc_acc))) | |
1237 | fcret = VERR_CR_ASSOC_ACC_LEN; | |
f77fc87c JS |
1238 | else if (assoc_acc->hdr.rqst.desc_tag != |
1239 | cpu_to_be32(FCNVME_LSDESC_RQST)) | |
e399441d JS |
1240 | fcret = VERR_LSDESC_RQST; |
1241 | else if (assoc_acc->hdr.rqst.desc_len != | |
1242 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) | |
1243 | fcret = VERR_LSDESC_RQST_LEN; | |
1244 | else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) | |
1245 | fcret = VERR_CR_ASSOC; | |
1246 | else if (assoc_acc->associd.desc_tag != | |
1247 | cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) | |
1248 | fcret = VERR_ASSOC_ID; | |
1249 | else if (assoc_acc->associd.desc_len != | |
1250 | fcnvme_lsdesc_len( | |
1251 | sizeof(struct fcnvme_lsdesc_assoc_id))) | |
1252 | fcret = VERR_ASSOC_ID_LEN; | |
1253 | else if (assoc_acc->connectid.desc_tag != | |
1254 | cpu_to_be32(FCNVME_LSDESC_CONN_ID)) | |
1255 | fcret = VERR_CONN_ID; | |
1256 | else if (assoc_acc->connectid.desc_len != | |
1257 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) | |
1258 | fcret = VERR_CONN_ID_LEN; | |
1259 | ||
1260 | if (fcret) { | |
1261 | ret = -EBADF; | |
1262 | dev_err(ctrl->dev, | |
1263 | "q %d connect failed: %s\n", | |
1264 | queue->qnum, validation_errors[fcret]); | |
1265 | } else { | |
1266 | ctrl->association_id = | |
1267 | be64_to_cpu(assoc_acc->associd.association_id); | |
1268 | queue->connection_id = | |
1269 | be64_to_cpu(assoc_acc->connectid.connection_id); | |
1270 | set_bit(NVME_FC_Q_CONNECTED, &queue->flags); | |
1271 | } | |
1272 | ||
1273 | out_free_buffer: | |
1274 | kfree(lsop); | |
1275 | out_no_memory: | |
1276 | if (ret) | |
1277 | dev_err(ctrl->dev, | |
1278 | "queue %d connect admin queue failed (%d).\n", | |
1279 | queue->qnum, ret); | |
1280 | return ret; | |
1281 | } | |
1282 | ||
1283 | static int | |
1284 | nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |
1285 | u16 qsize, u16 ersp_ratio) | |
1286 | { | |
1287 | struct nvmefc_ls_req_op *lsop; | |
1288 | struct nvmefc_ls_req *lsreq; | |
1289 | struct fcnvme_ls_cr_conn_rqst *conn_rqst; | |
1290 | struct fcnvme_ls_cr_conn_acc *conn_acc; | |
1291 | int ret, fcret = 0; | |
1292 | ||
1293 | lsop = kzalloc((sizeof(*lsop) + | |
1294 | ctrl->lport->ops->lsrqst_priv_sz + | |
1295 | sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL); | |
1296 | if (!lsop) { | |
1297 | ret = -ENOMEM; | |
1298 | goto out_no_memory; | |
1299 | } | |
1300 | lsreq = &lsop->ls_req; | |
1301 | ||
1302 | lsreq->private = (void *)&lsop[1]; | |
1303 | conn_rqst = (struct fcnvme_ls_cr_conn_rqst *) | |
1304 | (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); | |
1305 | conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; | |
1306 | ||
1307 | conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; | |
1308 | conn_rqst->desc_list_len = cpu_to_be32( | |
1309 | sizeof(struct fcnvme_lsdesc_assoc_id) + | |
1310 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | |
1311 | ||
1312 | conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); | |
1313 | conn_rqst->associd.desc_len = | |
1314 | fcnvme_lsdesc_len( | |
1315 | sizeof(struct fcnvme_lsdesc_assoc_id)); | |
1316 | conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); | |
1317 | conn_rqst->connect_cmd.desc_tag = | |
1318 | cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); | |
1319 | conn_rqst->connect_cmd.desc_len = | |
1320 | fcnvme_lsdesc_len( | |
1321 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | |
1322 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | |
1323 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); | |
d157e534 | 1324 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); |
e399441d JS |
1325 | |
1326 | lsop->queue = queue; | |
1327 | lsreq->rqstaddr = conn_rqst; | |
1328 | lsreq->rqstlen = sizeof(*conn_rqst); | |
1329 | lsreq->rspaddr = conn_acc; | |
1330 | lsreq->rsplen = sizeof(*conn_acc); | |
1331 | lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; | |
1332 | ||
c913a8b0 | 1333 | ret = nvme_fc_send_ls_req(ctrl->rport, lsop); |
e399441d JS |
1334 | if (ret) |
1335 | goto out_free_buffer; | |
1336 | ||
1337 | /* process connect LS completion */ | |
1338 | ||
1339 | /* validate the ACC response */ | |
1340 | if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) | |
1341 | fcret = VERR_LSACC; | |
f77fc87c | 1342 | else if (conn_acc->hdr.desc_list_len != |
e399441d JS |
1343 | fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) |
1344 | fcret = VERR_CR_CONN_ACC_LEN; | |
f77fc87c | 1345 | else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) |
e399441d JS |
1346 | fcret = VERR_LSDESC_RQST; |
1347 | else if (conn_acc->hdr.rqst.desc_len != | |
1348 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) | |
1349 | fcret = VERR_LSDESC_RQST_LEN; | |
1350 | else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) | |
1351 | fcret = VERR_CR_CONN; | |
1352 | else if (conn_acc->connectid.desc_tag != | |
1353 | cpu_to_be32(FCNVME_LSDESC_CONN_ID)) | |
1354 | fcret = VERR_CONN_ID; | |
1355 | else if (conn_acc->connectid.desc_len != | |
1356 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) | |
1357 | fcret = VERR_CONN_ID_LEN; | |
1358 | ||
1359 | if (fcret) { | |
1360 | ret = -EBADF; | |
1361 | dev_err(ctrl->dev, | |
1362 | "q %d connect failed: %s\n", | |
1363 | queue->qnum, validation_errors[fcret]); | |
1364 | } else { | |
1365 | queue->connection_id = | |
1366 | be64_to_cpu(conn_acc->connectid.connection_id); | |
1367 | set_bit(NVME_FC_Q_CONNECTED, &queue->flags); | |
1368 | } | |
1369 | ||
1370 | out_free_buffer: | |
1371 | kfree(lsop); | |
1372 | out_no_memory: | |
1373 | if (ret) | |
1374 | dev_err(ctrl->dev, | |
1375 | "queue %d connect command failed (%d).\n", | |
1376 | queue->qnum, ret); | |
1377 | return ret; | |
1378 | } | |
1379 | ||
1380 | static void | |
1381 | nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) | |
1382 | { | |
1383 | struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); | |
e399441d | 1384 | |
c913a8b0 | 1385 | __nvme_fc_finish_ls_req(lsop); |
e399441d JS |
1386 | |
1387 | /* fc-nvme iniator doesn't care about success or failure of cmd */ | |
1388 | ||
1389 | kfree(lsop); | |
1390 | } | |
1391 | ||
1392 | /* | |
1393 | * This routine sends a FC-NVME LS to disconnect (aka terminate) | |
1394 | * the FC-NVME Association. Terminating the association also | |
1395 | * terminates the FC-NVME connections (per queue, both admin and io | |
1396 | * queues) that are part of the association. E.g. things are torn | |
1397 | * down, and the related FC-NVME Association ID and Connection IDs | |
1398 | * become invalid. | |
1399 | * | |
1400 | * The behavior of the fc-nvme initiator is such that it's | |
1401 | * understanding of the association and connections will implicitly | |
1402 | * be torn down. The action is implicit as it may be due to a loss of | |
1403 | * connectivity with the fc-nvme target, so you may never get a | |
1404 | * response even if you tried. As such, the action of this routine | |
1405 | * is to asynchronously send the LS, ignore any results of the LS, and | |
1406 | * continue on with terminating the association. If the fc-nvme target | |
1407 | * is present and receives the LS, it too can tear down. | |
1408 | */ | |
1409 | static void | |
1410 | nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) | |
1411 | { | |
1412 | struct fcnvme_ls_disconnect_rqst *discon_rqst; | |
1413 | struct fcnvme_ls_disconnect_acc *discon_acc; | |
1414 | struct nvmefc_ls_req_op *lsop; | |
1415 | struct nvmefc_ls_req *lsreq; | |
c913a8b0 | 1416 | int ret; |
e399441d JS |
1417 | |
1418 | lsop = kzalloc((sizeof(*lsop) + | |
1419 | ctrl->lport->ops->lsrqst_priv_sz + | |
1420 | sizeof(*discon_rqst) + sizeof(*discon_acc)), | |
1421 | GFP_KERNEL); | |
1422 | if (!lsop) | |
1423 | /* couldn't sent it... too bad */ | |
1424 | return; | |
1425 | ||
1426 | lsreq = &lsop->ls_req; | |
1427 | ||
1428 | lsreq->private = (void *)&lsop[1]; | |
1429 | discon_rqst = (struct fcnvme_ls_disconnect_rqst *) | |
1430 | (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); | |
1431 | discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1]; | |
1432 | ||
1433 | discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT; | |
1434 | discon_rqst->desc_list_len = cpu_to_be32( | |
1435 | sizeof(struct fcnvme_lsdesc_assoc_id) + | |
1436 | sizeof(struct fcnvme_lsdesc_disconn_cmd)); | |
1437 | ||
1438 | discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); | |
1439 | discon_rqst->associd.desc_len = | |
1440 | fcnvme_lsdesc_len( | |
1441 | sizeof(struct fcnvme_lsdesc_assoc_id)); | |
1442 | ||
1443 | discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); | |
1444 | ||
1445 | discon_rqst->discon_cmd.desc_tag = cpu_to_be32( | |
1446 | FCNVME_LSDESC_DISCONN_CMD); | |
1447 | discon_rqst->discon_cmd.desc_len = | |
1448 | fcnvme_lsdesc_len( | |
1449 | sizeof(struct fcnvme_lsdesc_disconn_cmd)); | |
1450 | discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION; | |
1451 | discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id); | |
1452 | ||
1453 | lsreq->rqstaddr = discon_rqst; | |
1454 | lsreq->rqstlen = sizeof(*discon_rqst); | |
1455 | lsreq->rspaddr = discon_acc; | |
1456 | lsreq->rsplen = sizeof(*discon_acc); | |
1457 | lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; | |
1458 | ||
c913a8b0 JS |
1459 | ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, |
1460 | nvme_fc_disconnect_assoc_done); | |
1461 | if (ret) | |
1462 | kfree(lsop); | |
e399441d JS |
1463 | |
1464 | /* only meaningful part to terminating the association */ | |
1465 | ctrl->association_id = 0; | |
1466 | } | |
1467 | ||
1468 | ||
1469 | /* *********************** NVME Ctrl Routines **************************** */ | |
1470 | ||
f874d5d0 | 1471 | static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); |
e399441d JS |
1472 | |
1473 | static int | |
1474 | nvme_fc_reinit_request(void *data, struct request *rq) | |
1475 | { | |
1476 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
1477 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
1478 | ||
1479 | memset(cmdiu, 0, sizeof(*cmdiu)); | |
1480 | cmdiu->scsi_id = NVME_CMD_SCSI_ID; | |
1481 | cmdiu->fc_id = NVME_CMD_FC_ID; | |
1482 | cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); | |
1483 | memset(&op->rsp_iu, 0, sizeof(op->rsp_iu)); | |
1484 | ||
1485 | return 0; | |
1486 | } | |
1487 | ||
1488 | static void | |
1489 | __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, | |
1490 | struct nvme_fc_fcp_op *op) | |
1491 | { | |
1492 | fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, | |
1493 | sizeof(op->rsp_iu), DMA_FROM_DEVICE); | |
1494 | fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, | |
1495 | sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
1496 | ||
1497 | atomic_set(&op->state, FCPOP_STATE_UNINIT); | |
1498 | } | |
1499 | ||
1500 | static void | |
d6296d39 CH |
1501 | nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, |
1502 | unsigned int hctx_idx) | |
e399441d JS |
1503 | { |
1504 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
1505 | ||
d6296d39 | 1506 | return __nvme_fc_exit_request(set->driver_data, op); |
e399441d JS |
1507 | } |
1508 | ||
78a7ac26 JS |
1509 | static int |
1510 | __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) | |
1511 | { | |
3efd6e8e JS |
1512 | unsigned long flags; |
1513 | int opstate; | |
1514 | ||
1515 | spin_lock_irqsave(&ctrl->lock, flags); | |
1516 | opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); | |
1517 | if (opstate != FCPOP_STATE_ACTIVE) | |
1518 | atomic_set(&op->state, opstate); | |
1519 | else if (ctrl->flags & FCCTRL_TERMIO) | |
1520 | ctrl->iocnt++; | |
1521 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
78a7ac26 | 1522 | |
3efd6e8e | 1523 | if (opstate != FCPOP_STATE_ACTIVE) |
78a7ac26 | 1524 | return -ECANCELED; |
78a7ac26 JS |
1525 | |
1526 | ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, | |
1527 | &ctrl->rport->remoteport, | |
1528 | op->queue->lldd_handle, | |
1529 | &op->fcp_req); | |
1530 | ||
1531 | return 0; | |
1532 | } | |
1533 | ||
e399441d | 1534 | static void |
78a7ac26 | 1535 | nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) |
e399441d JS |
1536 | { |
1537 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; | |
3efd6e8e | 1538 | int i; |
78a7ac26 | 1539 | |
3efd6e8e JS |
1540 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) |
1541 | __nvme_fc_abort_op(ctrl, aen_op); | |
e399441d JS |
1542 | } |
1543 | ||
c3aedd22 | 1544 | static inline void |
78a7ac26 | 1545 | __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, |
3efd6e8e | 1546 | struct nvme_fc_fcp_op *op, int opstate) |
78a7ac26 JS |
1547 | { |
1548 | unsigned long flags; | |
78a7ac26 | 1549 | |
c3aedd22 JS |
1550 | if (opstate == FCPOP_STATE_ABORTED) { |
1551 | spin_lock_irqsave(&ctrl->lock, flags); | |
1552 | if (ctrl->flags & FCCTRL_TERMIO) { | |
1553 | if (!--ctrl->iocnt) | |
1554 | wake_up(&ctrl->ioabort_wait); | |
1555 | } | |
1556 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
61bff8ef | 1557 | } |
78a7ac26 JS |
1558 | } |
1559 | ||
baee29ac | 1560 | static void |
e399441d JS |
1561 | nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) |
1562 | { | |
1563 | struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); | |
1564 | struct request *rq = op->rq; | |
1565 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
1566 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
1567 | struct nvme_fc_queue *queue = op->queue; | |
1568 | struct nvme_completion *cqe = &op->rsp_iu.cqe; | |
458f280d | 1569 | struct nvme_command *sqe = &op->cmd_iu.sqe; |
d663b69f | 1570 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); |
27fa9bc5 | 1571 | union nvme_result result; |
0a02e39f | 1572 | bool terminate_assoc = true; |
3efd6e8e | 1573 | int opstate; |
e399441d JS |
1574 | |
1575 | /* | |
1576 | * WARNING: | |
1577 | * The current linux implementation of a nvme controller | |
1578 | * allocates a single tag set for all io queues and sizes | |
1579 | * the io queues to fully hold all possible tags. Thus, the | |
1580 | * implementation does not reference or care about the sqhd | |
1581 | * value as it never needs to use the sqhd/sqtail pointers | |
1582 | * for submission pacing. | |
1583 | * | |
1584 | * This affects the FC-NVME implementation in two ways: | |
1585 | * 1) As the value doesn't matter, we don't need to waste | |
1586 | * cycles extracting it from ERSPs and stamping it in the | |
1587 | * cases where the transport fabricates CQEs on successful | |
1588 | * completions. | |
1589 | * 2) The FC-NVME implementation requires that delivery of | |
1590 | * ERSP completions are to go back to the nvme layer in order | |
1591 | * relative to the rsn, such that the sqhd value will always | |
1592 | * be "in order" for the nvme layer. As the nvme layer in | |
1593 | * linux doesn't care about sqhd, there's no need to return | |
1594 | * them in order. | |
1595 | * | |
1596 | * Additionally: | |
1597 | * As the core nvme layer in linux currently does not look at | |
1598 | * every field in the cqe - in cases where the FC transport must | |
1599 | * fabricate a CQE, the following fields will not be set as they | |
1600 | * are not referenced: | |
1601 | * cqe.sqid, cqe.sqhd, cqe.command_id | |
f874d5d0 JS |
1602 | * |
1603 | * Failure or error of an individual i/o, in a transport | |
1604 | * detected fashion unrelated to the nvme completion status, | |
1605 | * potentially cause the initiator and target sides to get out | |
1606 | * of sync on SQ head/tail (aka outstanding io count allowed). | |
1607 | * Per FC-NVME spec, failure of an individual command requires | |
1608 | * the connection to be terminated, which in turn requires the | |
1609 | * association to be terminated. | |
e399441d JS |
1610 | */ |
1611 | ||
3efd6e8e JS |
1612 | opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); |
1613 | ||
e399441d JS |
1614 | fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, |
1615 | sizeof(op->rsp_iu), DMA_FROM_DEVICE); | |
1616 | ||
3efd6e8e | 1617 | if (opstate == FCPOP_STATE_ABORTED) |
0a02e39f | 1618 | status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); |
62eeacb0 | 1619 | else if (freq->status) |
56b7103a | 1620 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
e399441d JS |
1621 | |
1622 | /* | |
1623 | * For the linux implementation, if we have an unsuccesful | |
1624 | * status, they blk-mq layer can typically be called with the | |
1625 | * non-zero status and the content of the cqe isn't important. | |
1626 | */ | |
1627 | if (status) | |
1628 | goto done; | |
1629 | ||
1630 | /* | |
1631 | * command completed successfully relative to the wire | |
1632 | * protocol. However, validate anything received and | |
1633 | * extract the status and result from the cqe (create it | |
1634 | * where necessary). | |
1635 | */ | |
1636 | ||
1637 | switch (freq->rcv_rsplen) { | |
1638 | ||
1639 | case 0: | |
1640 | case NVME_FC_SIZEOF_ZEROS_RSP: | |
1641 | /* | |
1642 | * No response payload or 12 bytes of payload (which | |
1643 | * should all be zeros) are considered successful and | |
1644 | * no payload in the CQE by the transport. | |
1645 | */ | |
1646 | if (freq->transferred_length != | |
1647 | be32_to_cpu(op->cmd_iu.data_len)) { | |
56b7103a | 1648 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
e399441d JS |
1649 | goto done; |
1650 | } | |
27fa9bc5 | 1651 | result.u64 = 0; |
e399441d JS |
1652 | break; |
1653 | ||
1654 | case sizeof(struct nvme_fc_ersp_iu): | |
1655 | /* | |
1656 | * The ERSP IU contains a full completion with CQE. | |
1657 | * Validate ERSP IU and look at cqe. | |
1658 | */ | |
1659 | if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != | |
1660 | (freq->rcv_rsplen / 4) || | |
1661 | be32_to_cpu(op->rsp_iu.xfrd_len) != | |
1662 | freq->transferred_length || | |
726a1080 | 1663 | op->rsp_iu.status_code || |
458f280d | 1664 | sqe->common.command_id != cqe->command_id)) { |
56b7103a | 1665 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
e399441d JS |
1666 | goto done; |
1667 | } | |
27fa9bc5 | 1668 | result = cqe->result; |
d663b69f | 1669 | status = cqe->status; |
e399441d JS |
1670 | break; |
1671 | ||
1672 | default: | |
56b7103a | 1673 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
e399441d JS |
1674 | goto done; |
1675 | } | |
1676 | ||
f874d5d0 JS |
1677 | terminate_assoc = false; |
1678 | ||
e399441d | 1679 | done: |
78a7ac26 | 1680 | if (op->flags & FCOP_FLAGS_AEN) { |
27fa9bc5 | 1681 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); |
3efd6e8e | 1682 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); |
78a7ac26 JS |
1683 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
1684 | op->flags = FCOP_FLAGS_AEN; /* clear other flags */ | |
e399441d | 1685 | nvme_fc_ctrl_put(ctrl); |
f874d5d0 | 1686 | goto check_error; |
e399441d JS |
1687 | } |
1688 | ||
0a02e39f JS |
1689 | /* |
1690 | * Force failures of commands if we're killing the controller | |
1691 | * or have an error on a command used to create an new association | |
1692 | */ | |
1693 | if (status && | |
1694 | (blk_queue_dying(rq->q) || | |
1695 | ctrl->ctrl.state == NVME_CTRL_NEW || | |
ad6a0a52 | 1696 | ctrl->ctrl.state == NVME_CTRL_CONNECTING)) |
0a02e39f JS |
1697 | status |= cpu_to_le16(NVME_SC_DNR << 1); |
1698 | ||
c3aedd22 JS |
1699 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); |
1700 | nvme_end_request(rq, status, result); | |
f874d5d0 JS |
1701 | |
1702 | check_error: | |
1703 | if (terminate_assoc) | |
1704 | nvme_fc_error_recovery(ctrl, "transport detected io error"); | |
e399441d JS |
1705 | } |
1706 | ||
1707 | static int | |
1708 | __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, | |
1709 | struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, | |
1710 | struct request *rq, u32 rqno) | |
1711 | { | |
1712 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
1713 | int ret = 0; | |
1714 | ||
1715 | memset(op, 0, sizeof(*op)); | |
1716 | op->fcp_req.cmdaddr = &op->cmd_iu; | |
1717 | op->fcp_req.cmdlen = sizeof(op->cmd_iu); | |
1718 | op->fcp_req.rspaddr = &op->rsp_iu; | |
1719 | op->fcp_req.rsplen = sizeof(op->rsp_iu); | |
1720 | op->fcp_req.done = nvme_fc_fcpio_done; | |
1721 | op->fcp_req.first_sgl = (struct scatterlist *)&op[1]; | |
1722 | op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE]; | |
1723 | op->ctrl = ctrl; | |
1724 | op->queue = queue; | |
1725 | op->rq = rq; | |
1726 | op->rqno = rqno; | |
1727 | ||
1728 | cmdiu->scsi_id = NVME_CMD_SCSI_ID; | |
1729 | cmdiu->fc_id = NVME_CMD_FC_ID; | |
1730 | cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); | |
1731 | ||
1732 | op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, | |
1733 | &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
1734 | if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { | |
1735 | dev_err(ctrl->dev, | |
1736 | "FCP Op failed - cmdiu dma mapping failed.\n"); | |
1737 | ret = EFAULT; | |
1738 | goto out_on_error; | |
1739 | } | |
1740 | ||
1741 | op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, | |
1742 | &op->rsp_iu, sizeof(op->rsp_iu), | |
1743 | DMA_FROM_DEVICE); | |
1744 | if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { | |
1745 | dev_err(ctrl->dev, | |
1746 | "FCP Op failed - rspiu dma mapping failed.\n"); | |
1747 | ret = EFAULT; | |
1748 | } | |
1749 | ||
1750 | atomic_set(&op->state, FCPOP_STATE_IDLE); | |
1751 | out_on_error: | |
1752 | return ret; | |
1753 | } | |
1754 | ||
1755 | static int | |
d6296d39 CH |
1756 | nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, |
1757 | unsigned int hctx_idx, unsigned int numa_node) | |
e399441d | 1758 | { |
d6296d39 | 1759 | struct nvme_fc_ctrl *ctrl = set->driver_data; |
e399441d | 1760 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); |
76f983cb CH |
1761 | int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; |
1762 | struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; | |
e399441d JS |
1763 | |
1764 | return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++); | |
1765 | } | |
1766 | ||
1767 | static int | |
1768 | nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) | |
1769 | { | |
1770 | struct nvme_fc_fcp_op *aen_op; | |
1771 | struct nvme_fc_cmd_iu *cmdiu; | |
1772 | struct nvme_command *sqe; | |
61bff8ef | 1773 | void *private; |
e399441d JS |
1774 | int i, ret; |
1775 | ||
1776 | aen_op = ctrl->aen_ops; | |
38dabe21 | 1777 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { |
61bff8ef JS |
1778 | private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, |
1779 | GFP_KERNEL); | |
1780 | if (!private) | |
1781 | return -ENOMEM; | |
1782 | ||
e399441d JS |
1783 | cmdiu = &aen_op->cmd_iu; |
1784 | sqe = &cmdiu->sqe; | |
1785 | ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], | |
1786 | aen_op, (struct request *)NULL, | |
38dabe21 | 1787 | (NVME_AQ_BLK_MQ_DEPTH + i)); |
61bff8ef JS |
1788 | if (ret) { |
1789 | kfree(private); | |
e399441d | 1790 | return ret; |
61bff8ef | 1791 | } |
e399441d | 1792 | |
78a7ac26 | 1793 | aen_op->flags = FCOP_FLAGS_AEN; |
61bff8ef JS |
1794 | aen_op->fcp_req.first_sgl = NULL; /* no sg list */ |
1795 | aen_op->fcp_req.private = private; | |
78a7ac26 | 1796 | |
e399441d JS |
1797 | memset(sqe, 0, sizeof(*sqe)); |
1798 | sqe->common.opcode = nvme_admin_async_event; | |
78a7ac26 | 1799 | /* Note: core layer may overwrite the sqe.command_id value */ |
38dabe21 | 1800 | sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; |
e399441d JS |
1801 | } |
1802 | return 0; | |
1803 | } | |
1804 | ||
61bff8ef JS |
1805 | static void |
1806 | nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) | |
1807 | { | |
1808 | struct nvme_fc_fcp_op *aen_op; | |
1809 | int i; | |
1810 | ||
1811 | aen_op = ctrl->aen_ops; | |
38dabe21 | 1812 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { |
61bff8ef JS |
1813 | if (!aen_op->fcp_req.private) |
1814 | continue; | |
1815 | ||
1816 | __nvme_fc_exit_request(ctrl, aen_op); | |
1817 | ||
1818 | kfree(aen_op->fcp_req.private); | |
1819 | aen_op->fcp_req.private = NULL; | |
1820 | } | |
1821 | } | |
e399441d JS |
1822 | |
1823 | static inline void | |
1824 | __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, | |
1825 | unsigned int qidx) | |
1826 | { | |
1827 | struct nvme_fc_queue *queue = &ctrl->queues[qidx]; | |
1828 | ||
1829 | hctx->driver_data = queue; | |
1830 | queue->hctx = hctx; | |
1831 | } | |
1832 | ||
1833 | static int | |
1834 | nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | |
1835 | unsigned int hctx_idx) | |
1836 | { | |
1837 | struct nvme_fc_ctrl *ctrl = data; | |
1838 | ||
1839 | __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); | |
1840 | ||
1841 | return 0; | |
1842 | } | |
1843 | ||
1844 | static int | |
1845 | nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, | |
1846 | unsigned int hctx_idx) | |
1847 | { | |
1848 | struct nvme_fc_ctrl *ctrl = data; | |
1849 | ||
1850 | __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); | |
1851 | ||
1852 | return 0; | |
1853 | } | |
1854 | ||
1855 | static void | |
08e15075 | 1856 | nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) |
e399441d JS |
1857 | { |
1858 | struct nvme_fc_queue *queue; | |
1859 | ||
1860 | queue = &ctrl->queues[idx]; | |
1861 | memset(queue, 0, sizeof(*queue)); | |
1862 | queue->ctrl = ctrl; | |
1863 | queue->qnum = idx; | |
1864 | atomic_set(&queue->csn, 1); | |
1865 | queue->dev = ctrl->dev; | |
1866 | ||
1867 | if (idx > 0) | |
1868 | queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; | |
1869 | else | |
1870 | queue->cmnd_capsule_len = sizeof(struct nvme_command); | |
1871 | ||
e399441d JS |
1872 | /* |
1873 | * Considered whether we should allocate buffers for all SQEs | |
1874 | * and CQEs and dma map them - mapping their respective entries | |
1875 | * into the request structures (kernel vm addr and dma address) | |
1876 | * thus the driver could use the buffers/mappings directly. | |
1877 | * It only makes sense if the LLDD would use them for its | |
1878 | * messaging api. It's very unlikely most adapter api's would use | |
1879 | * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload | |
1880 | * structures were used instead. | |
1881 | */ | |
1882 | } | |
1883 | ||
1884 | /* | |
1885 | * This routine terminates a queue at the transport level. | |
1886 | * The transport has already ensured that all outstanding ios on | |
1887 | * the queue have been terminated. | |
1888 | * The transport will send a Disconnect LS request to terminate | |
1889 | * the queue's connection. Termination of the admin queue will also | |
1890 | * terminate the association at the target. | |
1891 | */ | |
1892 | static void | |
1893 | nvme_fc_free_queue(struct nvme_fc_queue *queue) | |
1894 | { | |
1895 | if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) | |
1896 | return; | |
1897 | ||
9e0ed16a | 1898 | clear_bit(NVME_FC_Q_LIVE, &queue->flags); |
e399441d JS |
1899 | /* |
1900 | * Current implementation never disconnects a single queue. | |
1901 | * It always terminates a whole association. So there is never | |
1902 | * a disconnect(queue) LS sent to the target. | |
1903 | */ | |
1904 | ||
1905 | queue->connection_id = 0; | |
e399441d JS |
1906 | } |
1907 | ||
1908 | static void | |
1909 | __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, | |
1910 | struct nvme_fc_queue *queue, unsigned int qidx) | |
1911 | { | |
1912 | if (ctrl->lport->ops->delete_queue) | |
1913 | ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, | |
1914 | queue->lldd_handle); | |
1915 | queue->lldd_handle = NULL; | |
1916 | } | |
1917 | ||
e399441d JS |
1918 | static void |
1919 | nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) | |
1920 | { | |
1921 | int i; | |
1922 | ||
d858e5f0 | 1923 | for (i = 1; i < ctrl->ctrl.queue_count; i++) |
e399441d JS |
1924 | nvme_fc_free_queue(&ctrl->queues[i]); |
1925 | } | |
1926 | ||
1927 | static int | |
1928 | __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, | |
1929 | struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) | |
1930 | { | |
1931 | int ret = 0; | |
1932 | ||
1933 | queue->lldd_handle = NULL; | |
1934 | if (ctrl->lport->ops->create_queue) | |
1935 | ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, | |
1936 | qidx, qsize, &queue->lldd_handle); | |
1937 | ||
1938 | return ret; | |
1939 | } | |
1940 | ||
1941 | static void | |
1942 | nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) | |
1943 | { | |
d858e5f0 | 1944 | struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; |
e399441d JS |
1945 | int i; |
1946 | ||
d858e5f0 | 1947 | for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) |
e399441d JS |
1948 | __nvme_fc_delete_hw_queue(ctrl, queue, i); |
1949 | } | |
1950 | ||
1951 | static int | |
1952 | nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) | |
1953 | { | |
1954 | struct nvme_fc_queue *queue = &ctrl->queues[1]; | |
17a1ec08 | 1955 | int i, ret; |
e399441d | 1956 | |
d858e5f0 | 1957 | for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { |
e399441d | 1958 | ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); |
17a1ec08 JT |
1959 | if (ret) |
1960 | goto delete_queues; | |
e399441d JS |
1961 | } |
1962 | ||
1963 | return 0; | |
17a1ec08 JT |
1964 | |
1965 | delete_queues: | |
1966 | for (; i >= 0; i--) | |
1967 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); | |
1968 | return ret; | |
e399441d JS |
1969 | } |
1970 | ||
1971 | static int | |
1972 | nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) | |
1973 | { | |
1974 | int i, ret = 0; | |
1975 | ||
d858e5f0 | 1976 | for (i = 1; i < ctrl->ctrl.queue_count; i++) { |
e399441d JS |
1977 | ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, |
1978 | (qsize / 5)); | |
1979 | if (ret) | |
1980 | break; | |
1981 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); | |
1982 | if (ret) | |
1983 | break; | |
9e0ed16a SG |
1984 | |
1985 | set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); | |
e399441d JS |
1986 | } |
1987 | ||
1988 | return ret; | |
1989 | } | |
1990 | ||
1991 | static void | |
1992 | nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) | |
1993 | { | |
1994 | int i; | |
1995 | ||
d858e5f0 | 1996 | for (i = 1; i < ctrl->ctrl.queue_count; i++) |
08e15075 | 1997 | nvme_fc_init_queue(ctrl, i); |
e399441d JS |
1998 | } |
1999 | ||
2000 | static void | |
2001 | nvme_fc_ctrl_free(struct kref *ref) | |
2002 | { | |
2003 | struct nvme_fc_ctrl *ctrl = | |
2004 | container_of(ref, struct nvme_fc_ctrl, ref); | |
2005 | unsigned long flags; | |
2006 | ||
61bff8ef JS |
2007 | if (ctrl->ctrl.tagset) { |
2008 | blk_cleanup_queue(ctrl->ctrl.connect_q); | |
2009 | blk_mq_free_tag_set(&ctrl->tag_set); | |
e399441d JS |
2010 | } |
2011 | ||
61bff8ef JS |
2012 | /* remove from rport list */ |
2013 | spin_lock_irqsave(&ctrl->rport->lock, flags); | |
2014 | list_del(&ctrl->ctrl_list); | |
2015 | spin_unlock_irqrestore(&ctrl->rport->lock, flags); | |
2016 | ||
f9c5af5f | 2017 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); |
61bff8ef JS |
2018 | blk_cleanup_queue(ctrl->ctrl.admin_q); |
2019 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | |
2020 | ||
2021 | kfree(ctrl->queues); | |
2022 | ||
e399441d JS |
2023 | put_device(ctrl->dev); |
2024 | nvme_fc_rport_put(ctrl->rport); | |
2025 | ||
e399441d | 2026 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); |
de41447a EM |
2027 | if (ctrl->ctrl.opts) |
2028 | nvmf_free_options(ctrl->ctrl.opts); | |
e399441d JS |
2029 | kfree(ctrl); |
2030 | } | |
2031 | ||
2032 | static void | |
2033 | nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) | |
2034 | { | |
2035 | kref_put(&ctrl->ref, nvme_fc_ctrl_free); | |
2036 | } | |
2037 | ||
2038 | static int | |
2039 | nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) | |
2040 | { | |
2041 | return kref_get_unless_zero(&ctrl->ref); | |
2042 | } | |
2043 | ||
2044 | /* | |
2045 | * All accesses from nvme core layer done - can now free the | |
2046 | * controller. Called after last nvme_put_ctrl() call | |
2047 | */ | |
2048 | static void | |
61bff8ef | 2049 | nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) |
e399441d JS |
2050 | { |
2051 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | |
2052 | ||
2053 | WARN_ON(nctrl != &ctrl->ctrl); | |
2054 | ||
61bff8ef JS |
2055 | nvme_fc_ctrl_put(ctrl); |
2056 | } | |
e399441d | 2057 | |
61bff8ef JS |
2058 | static void |
2059 | nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) | |
2060 | { | |
69fa9646 JS |
2061 | /* only proceed if in LIVE state - e.g. on first error */ |
2062 | if (ctrl->ctrl.state != NVME_CTRL_LIVE) | |
2063 | return; | |
2064 | ||
61bff8ef JS |
2065 | dev_warn(ctrl->ctrl.device, |
2066 | "NVME-FC{%d}: transport association error detected: %s\n", | |
2067 | ctrl->cnum, errmsg); | |
589ff775 | 2068 | dev_warn(ctrl->ctrl.device, |
61bff8ef | 2069 | "NVME-FC{%d}: resetting controller\n", ctrl->cnum); |
e399441d | 2070 | |
d86c4d8e | 2071 | nvme_reset_ctrl(&ctrl->ctrl); |
e399441d JS |
2072 | } |
2073 | ||
baee29ac | 2074 | static enum blk_eh_timer_return |
e399441d JS |
2075 | nvme_fc_timeout(struct request *rq, bool reserved) |
2076 | { | |
2077 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
2078 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
2079 | int ret; | |
2080 | ||
134aedc9 JS |
2081 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || |
2082 | atomic_read(&op->state) == FCPOP_STATE_ABORTED) | |
e399441d JS |
2083 | return BLK_EH_RESET_TIMER; |
2084 | ||
2085 | ret = __nvme_fc_abort_op(ctrl, op); | |
2086 | if (ret) | |
134aedc9 JS |
2087 | /* io wasn't active to abort */ |
2088 | return BLK_EH_NOT_HANDLED; | |
e399441d JS |
2089 | |
2090 | /* | |
61bff8ef JS |
2091 | * we can't individually ABTS an io without affecting the queue, |
2092 | * thus killing the queue, adn thus the association. | |
2093 | * So resolve by performing a controller reset, which will stop | |
2094 | * the host/io stack, terminate the association on the link, | |
2095 | * and recreate an association on the link. | |
e399441d | 2096 | */ |
61bff8ef | 2097 | nvme_fc_error_recovery(ctrl, "io timeout error"); |
e399441d | 2098 | |
134aedc9 JS |
2099 | /* |
2100 | * the io abort has been initiated. Have the reset timer | |
2101 | * restarted and the abort completion will complete the io | |
2102 | * shortly. Avoids a synchronous wait while the abort finishes. | |
2103 | */ | |
2104 | return BLK_EH_RESET_TIMER; | |
e399441d JS |
2105 | } |
2106 | ||
2107 | static int | |
2108 | nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |
2109 | struct nvme_fc_fcp_op *op) | |
2110 | { | |
2111 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
e399441d JS |
2112 | enum dma_data_direction dir; |
2113 | int ret; | |
2114 | ||
2115 | freq->sg_cnt = 0; | |
2116 | ||
b131c61d | 2117 | if (!blk_rq_payload_bytes(rq)) |
e399441d JS |
2118 | return 0; |
2119 | ||
2120 | freq->sg_table.sgl = freq->first_sgl; | |
19e420bb CH |
2121 | ret = sg_alloc_table_chained(&freq->sg_table, |
2122 | blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); | |
e399441d JS |
2123 | if (ret) |
2124 | return -ENOMEM; | |
2125 | ||
2126 | op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); | |
19e420bb | 2127 | WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); |
e399441d JS |
2128 | dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
2129 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, | |
2130 | op->nents, dir); | |
2131 | if (unlikely(freq->sg_cnt <= 0)) { | |
2132 | sg_free_table_chained(&freq->sg_table, true); | |
2133 | freq->sg_cnt = 0; | |
2134 | return -EFAULT; | |
2135 | } | |
2136 | ||
2137 | /* | |
2138 | * TODO: blk_integrity_rq(rq) for DIF | |
2139 | */ | |
2140 | return 0; | |
2141 | } | |
2142 | ||
2143 | static void | |
2144 | nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |
2145 | struct nvme_fc_fcp_op *op) | |
2146 | { | |
2147 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
2148 | ||
2149 | if (!freq->sg_cnt) | |
2150 | return; | |
2151 | ||
2152 | fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, | |
2153 | ((rq_data_dir(rq) == WRITE) ? | |
2154 | DMA_TO_DEVICE : DMA_FROM_DEVICE)); | |
2155 | ||
2156 | nvme_cleanup_cmd(rq); | |
2157 | ||
2158 | sg_free_table_chained(&freq->sg_table, true); | |
2159 | ||
2160 | freq->sg_cnt = 0; | |
2161 | } | |
2162 | ||
2163 | /* | |
2164 | * In FC, the queue is a logical thing. At transport connect, the target | |
2165 | * creates its "queue" and returns a handle that is to be given to the | |
2166 | * target whenever it posts something to the corresponding SQ. When an | |
2167 | * SQE is sent on a SQ, FC effectively considers the SQE, or rather the | |
2168 | * command contained within the SQE, an io, and assigns a FC exchange | |
2169 | * to it. The SQE and the associated SQ handle are sent in the initial | |
2170 | * CMD IU sents on the exchange. All transfers relative to the io occur | |
2171 | * as part of the exchange. The CQE is the last thing for the io, | |
2172 | * which is transferred (explicitly or implicitly) with the RSP IU | |
2173 | * sent on the exchange. After the CQE is received, the FC exchange is | |
2174 | * terminaed and the Exchange may be used on a different io. | |
2175 | * | |
2176 | * The transport to LLDD api has the transport making a request for a | |
2177 | * new fcp io request to the LLDD. The LLDD then allocates a FC exchange | |
2178 | * resource and transfers the command. The LLDD will then process all | |
2179 | * steps to complete the io. Upon completion, the transport done routine | |
2180 | * is called. | |
2181 | * | |
2182 | * So - while the operation is outstanding to the LLDD, there is a link | |
2183 | * level FC exchange resource that is also outstanding. This must be | |
2184 | * considered in all cleanup operations. | |
2185 | */ | |
fc17b653 | 2186 | static blk_status_t |
e399441d JS |
2187 | nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, |
2188 | struct nvme_fc_fcp_op *op, u32 data_len, | |
2189 | enum nvmefc_fcp_datadir io_dir) | |
2190 | { | |
2191 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
2192 | struct nvme_command *sqe = &cmdiu->sqe; | |
2193 | u32 csn; | |
2194 | int ret; | |
2195 | ||
61bff8ef JS |
2196 | /* |
2197 | * before attempting to send the io, check to see if we believe | |
2198 | * the target device is present | |
2199 | */ | |
2200 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) | |
86ff7c2a | 2201 | return BLK_STS_RESOURCE; |
61bff8ef | 2202 | |
e399441d | 2203 | if (!nvme_fc_ctrl_get(ctrl)) |
fc17b653 | 2204 | return BLK_STS_IOERR; |
e399441d JS |
2205 | |
2206 | /* format the FC-NVME CMD IU and fcp_req */ | |
2207 | cmdiu->connection_id = cpu_to_be64(queue->connection_id); | |
2208 | csn = atomic_inc_return(&queue->csn); | |
2209 | cmdiu->csn = cpu_to_be32(csn); | |
2210 | cmdiu->data_len = cpu_to_be32(data_len); | |
2211 | switch (io_dir) { | |
2212 | case NVMEFC_FCP_WRITE: | |
2213 | cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; | |
2214 | break; | |
2215 | case NVMEFC_FCP_READ: | |
2216 | cmdiu->flags = FCNVME_CMD_FLAGS_READ; | |
2217 | break; | |
2218 | case NVMEFC_FCP_NODATA: | |
2219 | cmdiu->flags = 0; | |
2220 | break; | |
2221 | } | |
2222 | op->fcp_req.payload_length = data_len; | |
2223 | op->fcp_req.io_dir = io_dir; | |
2224 | op->fcp_req.transferred_length = 0; | |
2225 | op->fcp_req.rcv_rsplen = 0; | |
62eeacb0 | 2226 | op->fcp_req.status = NVME_SC_SUCCESS; |
e399441d JS |
2227 | op->fcp_req.sqid = cpu_to_le16(queue->qnum); |
2228 | ||
2229 | /* | |
2230 | * validate per fabric rules, set fields mandated by fabric spec | |
2231 | * as well as those by FC-NVME spec. | |
2232 | */ | |
2233 | WARN_ON_ONCE(sqe->common.metadata); | |
e399441d JS |
2234 | sqe->common.flags |= NVME_CMD_SGL_METABUF; |
2235 | ||
2236 | /* | |
d9d34c0b JS |
2237 | * format SQE DPTR field per FC-NVME rules: |
2238 | * type=0x5 Transport SGL Data Block Descriptor | |
2239 | * subtype=0xA Transport-specific value | |
2240 | * address=0 | |
2241 | * length=length of the data series | |
e399441d | 2242 | */ |
d9d34c0b JS |
2243 | sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | |
2244 | NVME_SGL_FMT_TRANSPORT_A; | |
e399441d JS |
2245 | sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); |
2246 | sqe->rw.dptr.sgl.addr = 0; | |
2247 | ||
78a7ac26 | 2248 | if (!(op->flags & FCOP_FLAGS_AEN)) { |
e399441d JS |
2249 | ret = nvme_fc_map_data(ctrl, op->rq, op); |
2250 | if (ret < 0) { | |
e399441d JS |
2251 | nvme_cleanup_cmd(op->rq); |
2252 | nvme_fc_ctrl_put(ctrl); | |
fc17b653 CH |
2253 | if (ret == -ENOMEM || ret == -EAGAIN) |
2254 | return BLK_STS_RESOURCE; | |
2255 | return BLK_STS_IOERR; | |
e399441d JS |
2256 | } |
2257 | } | |
2258 | ||
2259 | fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, | |
2260 | sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
2261 | ||
2262 | atomic_set(&op->state, FCPOP_STATE_ACTIVE); | |
2263 | ||
78a7ac26 | 2264 | if (!(op->flags & FCOP_FLAGS_AEN)) |
e399441d JS |
2265 | blk_mq_start_request(op->rq); |
2266 | ||
2267 | ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, | |
2268 | &ctrl->rport->remoteport, | |
2269 | queue->lldd_handle, &op->fcp_req); | |
2270 | ||
2271 | if (ret) { | |
8b25f351 | 2272 | if (!(op->flags & FCOP_FLAGS_AEN)) |
e399441d | 2273 | nvme_fc_unmap_data(ctrl, op->rq, op); |
e399441d JS |
2274 | |
2275 | nvme_fc_ctrl_put(ctrl); | |
2276 | ||
8b25f351 JS |
2277 | if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && |
2278 | ret != -EBUSY) | |
fc17b653 | 2279 | return BLK_STS_IOERR; |
e399441d | 2280 | |
86ff7c2a | 2281 | return BLK_STS_RESOURCE; |
e399441d JS |
2282 | } |
2283 | ||
fc17b653 | 2284 | return BLK_STS_OK; |
e399441d JS |
2285 | } |
2286 | ||
9e0ed16a SG |
2287 | static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue, |
2288 | struct request *rq) | |
2289 | { | |
2290 | if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags))) | |
2291 | return nvmf_check_init_req(&queue->ctrl->ctrl, rq); | |
2292 | return BLK_STS_OK; | |
2293 | } | |
2294 | ||
fc17b653 | 2295 | static blk_status_t |
e399441d JS |
2296 | nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, |
2297 | const struct blk_mq_queue_data *bd) | |
2298 | { | |
2299 | struct nvme_ns *ns = hctx->queue->queuedata; | |
2300 | struct nvme_fc_queue *queue = hctx->driver_data; | |
2301 | struct nvme_fc_ctrl *ctrl = queue->ctrl; | |
2302 | struct request *rq = bd->rq; | |
2303 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
2304 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
2305 | struct nvme_command *sqe = &cmdiu->sqe; | |
2306 | enum nvmefc_fcp_datadir io_dir; | |
2307 | u32 data_len; | |
fc17b653 | 2308 | blk_status_t ret; |
e399441d | 2309 | |
9e0ed16a SG |
2310 | ret = nvme_fc_is_ready(queue, rq); |
2311 | if (unlikely(ret)) | |
2312 | return ret; | |
2313 | ||
e399441d JS |
2314 | ret = nvme_setup_cmd(ns, rq, sqe); |
2315 | if (ret) | |
2316 | return ret; | |
2317 | ||
b131c61d | 2318 | data_len = blk_rq_payload_bytes(rq); |
e399441d JS |
2319 | if (data_len) |
2320 | io_dir = ((rq_data_dir(rq) == WRITE) ? | |
2321 | NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); | |
2322 | else | |
2323 | io_dir = NVMEFC_FCP_NODATA; | |
2324 | ||
2325 | return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); | |
2326 | } | |
2327 | ||
2328 | static struct blk_mq_tags * | |
2329 | nvme_fc_tagset(struct nvme_fc_queue *queue) | |
2330 | { | |
2331 | if (queue->qnum == 0) | |
2332 | return queue->ctrl->admin_tag_set.tags[queue->qnum]; | |
2333 | ||
2334 | return queue->ctrl->tag_set.tags[queue->qnum - 1]; | |
2335 | } | |
2336 | ||
2337 | static int | |
2338 | nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) | |
2339 | ||
2340 | { | |
2341 | struct nvme_fc_queue *queue = hctx->driver_data; | |
2342 | struct nvme_fc_ctrl *ctrl = queue->ctrl; | |
2343 | struct request *req; | |
2344 | struct nvme_fc_fcp_op *op; | |
2345 | ||
2346 | req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag); | |
61bff8ef | 2347 | if (!req) |
e399441d | 2348 | return 0; |
e399441d JS |
2349 | |
2350 | op = blk_mq_rq_to_pdu(req); | |
2351 | ||
2352 | if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) && | |
2353 | (ctrl->lport->ops->poll_queue)) | |
2354 | ctrl->lport->ops->poll_queue(&ctrl->lport->localport, | |
2355 | queue->lldd_handle); | |
2356 | ||
2357 | return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE)); | |
2358 | } | |
2359 | ||
2360 | static void | |
ad22c355 | 2361 | nvme_fc_submit_async_event(struct nvme_ctrl *arg) |
e399441d JS |
2362 | { |
2363 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); | |
2364 | struct nvme_fc_fcp_op *aen_op; | |
61bff8ef JS |
2365 | unsigned long flags; |
2366 | bool terminating = false; | |
fc17b653 | 2367 | blk_status_t ret; |
e399441d | 2368 | |
61bff8ef JS |
2369 | spin_lock_irqsave(&ctrl->lock, flags); |
2370 | if (ctrl->flags & FCCTRL_TERMIO) | |
2371 | terminating = true; | |
2372 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
2373 | ||
2374 | if (terminating) | |
2375 | return; | |
2376 | ||
ad22c355 | 2377 | aen_op = &ctrl->aen_ops[0]; |
e399441d JS |
2378 | |
2379 | ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, | |
2380 | NVMEFC_FCP_NODATA); | |
2381 | if (ret) | |
2382 | dev_err(ctrl->ctrl.device, | |
ad22c355 | 2383 | "failed async event work\n"); |
e399441d JS |
2384 | } |
2385 | ||
2386 | static void | |
c3aedd22 | 2387 | nvme_fc_complete_rq(struct request *rq) |
e399441d JS |
2388 | { |
2389 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
2390 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
e399441d | 2391 | |
78a7ac26 | 2392 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
e399441d | 2393 | |
e399441d | 2394 | nvme_fc_unmap_data(ctrl, rq, op); |
77f02a7a | 2395 | nvme_complete_rq(rq); |
e399441d | 2396 | nvme_fc_ctrl_put(ctrl); |
78a7ac26 JS |
2397 | } |
2398 | ||
e399441d JS |
2399 | /* |
2400 | * This routine is used by the transport when it needs to find active | |
2401 | * io on a queue that is to be terminated. The transport uses | |
2402 | * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke | |
2403 | * this routine to kill them on a 1 by 1 basis. | |
2404 | * | |
2405 | * As FC allocates FC exchange for each io, the transport must contact | |
2406 | * the LLDD to terminate the exchange, thus releasing the FC exchange. | |
2407 | * After terminating the exchange the LLDD will call the transport's | |
2408 | * normal io done path for the request, but it will have an aborted | |
2409 | * status. The done path will return the io request back to the block | |
2410 | * layer with an error status. | |
2411 | */ | |
2412 | static void | |
2413 | nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) | |
2414 | { | |
2415 | struct nvme_ctrl *nctrl = data; | |
2416 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | |
2417 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); | |
e399441d JS |
2418 | |
2419 | if (!blk_mq_request_started(req)) | |
2420 | return; | |
2421 | ||
3efd6e8e | 2422 | __nvme_fc_abort_op(ctrl, op); |
e399441d JS |
2423 | } |
2424 | ||
78a7ac26 | 2425 | |
61bff8ef JS |
2426 | static const struct blk_mq_ops nvme_fc_mq_ops = { |
2427 | .queue_rq = nvme_fc_queue_rq, | |
2428 | .complete = nvme_fc_complete_rq, | |
2429 | .init_request = nvme_fc_init_request, | |
2430 | .exit_request = nvme_fc_exit_request, | |
61bff8ef JS |
2431 | .init_hctx = nvme_fc_init_hctx, |
2432 | .poll = nvme_fc_poll, | |
2433 | .timeout = nvme_fc_timeout, | |
2434 | }; | |
e399441d | 2435 | |
61bff8ef JS |
2436 | static int |
2437 | nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) | |
e399441d | 2438 | { |
61bff8ef | 2439 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; |
7314183d | 2440 | unsigned int nr_io_queues; |
61bff8ef | 2441 | int ret; |
e399441d | 2442 | |
7314183d SG |
2443 | nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), |
2444 | ctrl->lport->ops->max_hw_queues); | |
2445 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | |
61bff8ef JS |
2446 | if (ret) { |
2447 | dev_info(ctrl->ctrl.device, | |
2448 | "set_queue_count failed: %d\n", ret); | |
2449 | return ret; | |
2450 | } | |
e399441d | 2451 | |
7314183d SG |
2452 | ctrl->ctrl.queue_count = nr_io_queues + 1; |
2453 | if (!nr_io_queues) | |
61bff8ef | 2454 | return 0; |
e399441d | 2455 | |
61bff8ef | 2456 | nvme_fc_init_io_queues(ctrl); |
e399441d | 2457 | |
61bff8ef JS |
2458 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
2459 | ctrl->tag_set.ops = &nvme_fc_mq_ops; | |
2460 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; | |
2461 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ | |
2462 | ctrl->tag_set.numa_node = NUMA_NO_NODE; | |
2463 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | |
2464 | ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + | |
2465 | (SG_CHUNK_SIZE * | |
2466 | sizeof(struct scatterlist)) + | |
2467 | ctrl->lport->ops->fcprqst_priv_sz; | |
2468 | ctrl->tag_set.driver_data = ctrl; | |
d858e5f0 | 2469 | ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; |
61bff8ef | 2470 | ctrl->tag_set.timeout = NVME_IO_TIMEOUT; |
e399441d | 2471 | |
61bff8ef JS |
2472 | ret = blk_mq_alloc_tag_set(&ctrl->tag_set); |
2473 | if (ret) | |
2474 | return ret; | |
e399441d | 2475 | |
61bff8ef | 2476 | ctrl->ctrl.tagset = &ctrl->tag_set; |
e399441d | 2477 | |
61bff8ef JS |
2478 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); |
2479 | if (IS_ERR(ctrl->ctrl.connect_q)) { | |
2480 | ret = PTR_ERR(ctrl->ctrl.connect_q); | |
2481 | goto out_free_tag_set; | |
2482 | } | |
e399441d | 2483 | |
d157e534 | 2484 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
e399441d | 2485 | if (ret) |
61bff8ef | 2486 | goto out_cleanup_blk_queue; |
e399441d | 2487 | |
d157e534 | 2488 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
61bff8ef JS |
2489 | if (ret) |
2490 | goto out_delete_hw_queues; | |
e399441d JS |
2491 | |
2492 | return 0; | |
e399441d | 2493 | |
61bff8ef JS |
2494 | out_delete_hw_queues: |
2495 | nvme_fc_delete_hw_io_queues(ctrl); | |
2496 | out_cleanup_blk_queue: | |
61bff8ef JS |
2497 | blk_cleanup_queue(ctrl->ctrl.connect_q); |
2498 | out_free_tag_set: | |
2499 | blk_mq_free_tag_set(&ctrl->tag_set); | |
2500 | nvme_fc_free_io_queues(ctrl); | |
e399441d | 2501 | |
61bff8ef JS |
2502 | /* force put free routine to ignore io queues */ |
2503 | ctrl->ctrl.tagset = NULL; | |
2504 | ||
2505 | return ret; | |
2506 | } | |
e399441d JS |
2507 | |
2508 | static int | |
61bff8ef | 2509 | nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) |
e399441d JS |
2510 | { |
2511 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | |
7314183d | 2512 | unsigned int nr_io_queues; |
e399441d JS |
2513 | int ret; |
2514 | ||
7314183d SG |
2515 | nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), |
2516 | ctrl->lport->ops->max_hw_queues); | |
2517 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | |
e399441d JS |
2518 | if (ret) { |
2519 | dev_info(ctrl->ctrl.device, | |
2520 | "set_queue_count failed: %d\n", ret); | |
2521 | return ret; | |
2522 | } | |
2523 | ||
7314183d | 2524 | ctrl->ctrl.queue_count = nr_io_queues + 1; |
61bff8ef | 2525 | /* check for io queues existing */ |
d858e5f0 | 2526 | if (ctrl->ctrl.queue_count == 1) |
e399441d JS |
2527 | return 0; |
2528 | ||
e399441d JS |
2529 | nvme_fc_init_io_queues(ctrl); |
2530 | ||
31b84460 | 2531 | ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); |
e399441d | 2532 | if (ret) |
61bff8ef | 2533 | goto out_free_io_queues; |
e399441d | 2534 | |
d157e534 | 2535 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
e399441d | 2536 | if (ret) |
61bff8ef | 2537 | goto out_free_io_queues; |
e399441d | 2538 | |
d157e534 | 2539 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
e399441d JS |
2540 | if (ret) |
2541 | goto out_delete_hw_queues; | |
2542 | ||
cda5fd1a SG |
2543 | blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); |
2544 | ||
e399441d JS |
2545 | return 0; |
2546 | ||
2547 | out_delete_hw_queues: | |
2548 | nvme_fc_delete_hw_io_queues(ctrl); | |
61bff8ef | 2549 | out_free_io_queues: |
e399441d | 2550 | nvme_fc_free_io_queues(ctrl); |
61bff8ef JS |
2551 | return ret; |
2552 | } | |
e399441d | 2553 | |
158bfb88 JS |
2554 | static void |
2555 | nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) | |
2556 | { | |
2557 | struct nvme_fc_lport *lport = rport->lport; | |
2558 | ||
2559 | atomic_inc(&lport->act_rport_cnt); | |
2560 | } | |
2561 | ||
2562 | static void | |
2563 | nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) | |
2564 | { | |
2565 | struct nvme_fc_lport *lport = rport->lport; | |
2566 | u32 cnt; | |
2567 | ||
2568 | cnt = atomic_dec_return(&lport->act_rport_cnt); | |
2569 | if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) | |
2570 | lport->ops->localport_delete(&lport->localport); | |
2571 | } | |
2572 | ||
2573 | static int | |
2574 | nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) | |
2575 | { | |
2576 | struct nvme_fc_rport *rport = ctrl->rport; | |
2577 | u32 cnt; | |
2578 | ||
2579 | if (ctrl->assoc_active) | |
2580 | return 1; | |
2581 | ||
2582 | ctrl->assoc_active = true; | |
2583 | cnt = atomic_inc_return(&rport->act_ctrl_cnt); | |
2584 | if (cnt == 1) | |
2585 | nvme_fc_rport_active_on_lport(rport); | |
2586 | ||
2587 | return 0; | |
2588 | } | |
2589 | ||
2590 | static int | |
2591 | nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) | |
2592 | { | |
2593 | struct nvme_fc_rport *rport = ctrl->rport; | |
2594 | struct nvme_fc_lport *lport = rport->lport; | |
2595 | u32 cnt; | |
2596 | ||
2597 | /* ctrl->assoc_active=false will be set independently */ | |
2598 | ||
2599 | cnt = atomic_dec_return(&rport->act_ctrl_cnt); | |
2600 | if (cnt == 0) { | |
2601 | if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) | |
2602 | lport->ops->remoteport_delete(&rport->remoteport); | |
2603 | nvme_fc_rport_inactive_on_lport(rport); | |
2604 | } | |
2605 | ||
2606 | return 0; | |
2607 | } | |
2608 | ||
61bff8ef JS |
2609 | /* |
2610 | * This routine restarts the controller on the host side, and | |
2611 | * on the link side, recreates the controller association. | |
2612 | */ | |
2613 | static int | |
2614 | nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |
2615 | { | |
2616 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | |
61bff8ef JS |
2617 | int ret; |
2618 | bool changed; | |
2619 | ||
fdf9dfa8 | 2620 | ++ctrl->ctrl.nr_reconnects; |
61bff8ef | 2621 | |
96e24801 JS |
2622 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) |
2623 | return -ENODEV; | |
2624 | ||
158bfb88 JS |
2625 | if (nvme_fc_ctlr_active_on_rport(ctrl)) |
2626 | return -ENOTUNIQ; | |
2627 | ||
61bff8ef JS |
2628 | /* |
2629 | * Create the admin queue | |
2630 | */ | |
2631 | ||
08e15075 | 2632 | nvme_fc_init_queue(ctrl, 0); |
61bff8ef JS |
2633 | |
2634 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, | |
d157e534 | 2635 | NVME_AQ_DEPTH); |
61bff8ef JS |
2636 | if (ret) |
2637 | goto out_free_queue; | |
2638 | ||
2639 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], | |
d157e534 | 2640 | NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); |
61bff8ef JS |
2641 | if (ret) |
2642 | goto out_delete_hw_queue; | |
2643 | ||
2644 | if (ctrl->ctrl.state != NVME_CTRL_NEW) | |
f9c5af5f | 2645 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); |
61bff8ef JS |
2646 | |
2647 | ret = nvmf_connect_admin_queue(&ctrl->ctrl); | |
2648 | if (ret) | |
2649 | goto out_disconnect_admin_queue; | |
2650 | ||
9e0ed16a SG |
2651 | set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); |
2652 | ||
61bff8ef JS |
2653 | /* |
2654 | * Check controller capabilities | |
2655 | * | |
2656 | * todo:- add code to check if ctrl attributes changed from | |
2657 | * prior connection values | |
2658 | */ | |
2659 | ||
20d0dfe6 | 2660 | ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); |
61bff8ef JS |
2661 | if (ret) { |
2662 | dev_err(ctrl->ctrl.device, | |
2663 | "prop_get NVME_REG_CAP failed\n"); | |
2664 | goto out_disconnect_admin_queue; | |
2665 | } | |
2666 | ||
2667 | ctrl->ctrl.sqsize = | |
d157e534 | 2668 | min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); |
61bff8ef | 2669 | |
20d0dfe6 | 2670 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); |
61bff8ef JS |
2671 | if (ret) |
2672 | goto out_disconnect_admin_queue; | |
2673 | ||
ecad0d2c JS |
2674 | ctrl->ctrl.max_hw_sectors = |
2675 | (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9); | |
61bff8ef JS |
2676 | |
2677 | ret = nvme_init_identify(&ctrl->ctrl); | |
2678 | if (ret) | |
2679 | goto out_disconnect_admin_queue; | |
2680 | ||
2681 | /* sanity checks */ | |
2682 | ||
2683 | /* FC-NVME does not have other data in the capsule */ | |
2684 | if (ctrl->ctrl.icdoff) { | |
2685 | dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", | |
2686 | ctrl->ctrl.icdoff); | |
2687 | goto out_disconnect_admin_queue; | |
2688 | } | |
2689 | ||
61bff8ef JS |
2690 | /* FC-NVME supports normal SGL Data Block Descriptors */ |
2691 | ||
2692 | if (opts->queue_size > ctrl->ctrl.maxcmd) { | |
2693 | /* warn if maxcmd is lower than queue_size */ | |
2694 | dev_warn(ctrl->ctrl.device, | |
2695 | "queue_size %zu > ctrl maxcmd %u, reducing " | |
2696 | "to queue_size\n", | |
2697 | opts->queue_size, ctrl->ctrl.maxcmd); | |
2698 | opts->queue_size = ctrl->ctrl.maxcmd; | |
2699 | } | |
2700 | ||
d157e534 JS |
2701 | if (opts->queue_size > ctrl->ctrl.sqsize + 1) { |
2702 | /* warn if sqsize is lower than queue_size */ | |
2703 | dev_warn(ctrl->ctrl.device, | |
2704 | "queue_size %zu > ctrl sqsize %u, clamping down\n", | |
2705 | opts->queue_size, ctrl->ctrl.sqsize + 1); | |
2706 | opts->queue_size = ctrl->ctrl.sqsize + 1; | |
2707 | } | |
2708 | ||
61bff8ef JS |
2709 | ret = nvme_fc_init_aen_ops(ctrl); |
2710 | if (ret) | |
2711 | goto out_term_aen_ops; | |
2712 | ||
2713 | /* | |
2714 | * Create the io queues | |
2715 | */ | |
2716 | ||
d858e5f0 | 2717 | if (ctrl->ctrl.queue_count > 1) { |
61bff8ef JS |
2718 | if (ctrl->ctrl.state == NVME_CTRL_NEW) |
2719 | ret = nvme_fc_create_io_queues(ctrl); | |
2720 | else | |
2721 | ret = nvme_fc_reinit_io_queues(ctrl); | |
2722 | if (ret) | |
2723 | goto out_term_aen_ops; | |
2724 | } | |
2725 | ||
2726 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | |
61bff8ef | 2727 | |
fdf9dfa8 | 2728 | ctrl->ctrl.nr_reconnects = 0; |
61bff8ef | 2729 | |
44c6ec77 JS |
2730 | if (changed) |
2731 | nvme_start_ctrl(&ctrl->ctrl); | |
61bff8ef JS |
2732 | |
2733 | return 0; /* Success */ | |
2734 | ||
2735 | out_term_aen_ops: | |
2736 | nvme_fc_term_aen_ops(ctrl); | |
61bff8ef JS |
2737 | out_disconnect_admin_queue: |
2738 | /* send a Disconnect(association) LS to fc-nvme target */ | |
2739 | nvme_fc_xmt_disconnect_assoc(ctrl); | |
2740 | out_delete_hw_queue: | |
2741 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | |
2742 | out_free_queue: | |
2743 | nvme_fc_free_queue(&ctrl->queues[0]); | |
158bfb88 JS |
2744 | ctrl->assoc_active = false; |
2745 | nvme_fc_ctlr_inactive_on_rport(ctrl); | |
e399441d JS |
2746 | |
2747 | return ret; | |
2748 | } | |
2749 | ||
61bff8ef JS |
2750 | /* |
2751 | * This routine stops operation of the controller on the host side. | |
2752 | * On the host os stack side: Admin and IO queues are stopped, | |
2753 | * outstanding ios on them terminated via FC ABTS. | |
2754 | * On the link side: the association is terminated. | |
2755 | */ | |
2756 | static void | |
2757 | nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) | |
2758 | { | |
2759 | unsigned long flags; | |
2760 | ||
158bfb88 JS |
2761 | if (!ctrl->assoc_active) |
2762 | return; | |
2763 | ctrl->assoc_active = false; | |
2764 | ||
61bff8ef JS |
2765 | spin_lock_irqsave(&ctrl->lock, flags); |
2766 | ctrl->flags |= FCCTRL_TERMIO; | |
2767 | ctrl->iocnt = 0; | |
2768 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
2769 | ||
2770 | /* | |
2771 | * If io queues are present, stop them and terminate all outstanding | |
2772 | * ios on them. As FC allocates FC exchange for each io, the | |
2773 | * transport must contact the LLDD to terminate the exchange, | |
2774 | * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() | |
2775 | * to tell us what io's are busy and invoke a transport routine | |
2776 | * to kill them with the LLDD. After terminating the exchange | |
2777 | * the LLDD will call the transport's normal io done path, but it | |
2778 | * will have an aborted status. The done path will return the | |
2779 | * io requests back to the block layer as part of normal completions | |
2780 | * (but with error status). | |
2781 | */ | |
d858e5f0 | 2782 | if (ctrl->ctrl.queue_count > 1) { |
61bff8ef JS |
2783 | nvme_stop_queues(&ctrl->ctrl); |
2784 | blk_mq_tagset_busy_iter(&ctrl->tag_set, | |
2785 | nvme_fc_terminate_exchange, &ctrl->ctrl); | |
2786 | } | |
2787 | ||
2788 | /* | |
2789 | * Other transports, which don't have link-level contexts bound | |
2790 | * to sqe's, would try to gracefully shutdown the controller by | |
2791 | * writing the registers for shutdown and polling (call | |
2792 | * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially | |
2793 | * just aborted and we will wait on those contexts, and given | |
2794 | * there was no indication of how live the controlelr is on the | |
2795 | * link, don't send more io to create more contexts for the | |
2796 | * shutdown. Let the controller fail via keepalive failure if | |
2797 | * its still present. | |
2798 | */ | |
2799 | ||
2800 | /* | |
2801 | * clean up the admin queue. Same thing as above. | |
2802 | * use blk_mq_tagset_busy_itr() and the transport routine to | |
2803 | * terminate the exchanges. | |
2804 | */ | |
44c6ec77 JS |
2805 | if (ctrl->ctrl.state != NVME_CTRL_NEW) |
2806 | blk_mq_quiesce_queue(ctrl->ctrl.admin_q); | |
61bff8ef JS |
2807 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, |
2808 | nvme_fc_terminate_exchange, &ctrl->ctrl); | |
2809 | ||
2810 | /* kill the aens as they are a separate path */ | |
2811 | nvme_fc_abort_aen_ops(ctrl); | |
2812 | ||
2813 | /* wait for all io that had to be aborted */ | |
8a82dbf1 | 2814 | spin_lock_irq(&ctrl->lock); |
36715cf4 | 2815 | wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); |
61bff8ef | 2816 | ctrl->flags &= ~FCCTRL_TERMIO; |
8a82dbf1 | 2817 | spin_unlock_irq(&ctrl->lock); |
61bff8ef JS |
2818 | |
2819 | nvme_fc_term_aen_ops(ctrl); | |
2820 | ||
2821 | /* | |
2822 | * send a Disconnect(association) LS to fc-nvme target | |
2823 | * Note: could have been sent at top of process, but | |
2824 | * cleaner on link traffic if after the aborts complete. | |
2825 | * Note: if association doesn't exist, association_id will be 0 | |
2826 | */ | |
2827 | if (ctrl->association_id) | |
2828 | nvme_fc_xmt_disconnect_assoc(ctrl); | |
2829 | ||
2830 | if (ctrl->ctrl.tagset) { | |
2831 | nvme_fc_delete_hw_io_queues(ctrl); | |
2832 | nvme_fc_free_io_queues(ctrl); | |
2833 | } | |
2834 | ||
2835 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | |
2836 | nvme_fc_free_queue(&ctrl->queues[0]); | |
158bfb88 | 2837 | |
d625d05e JS |
2838 | /* re-enable the admin_q so anything new can fast fail */ |
2839 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); | |
2840 | ||
158bfb88 | 2841 | nvme_fc_ctlr_inactive_on_rport(ctrl); |
61bff8ef JS |
2842 | } |
2843 | ||
2844 | static void | |
c5017e85 | 2845 | nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) |
61bff8ef | 2846 | { |
c5017e85 | 2847 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); |
61bff8ef | 2848 | |
61bff8ef | 2849 | cancel_delayed_work_sync(&ctrl->connect_work); |
61bff8ef JS |
2850 | /* |
2851 | * kill the association on the link side. this will block | |
2852 | * waiting for io to terminate | |
2853 | */ | |
2854 | nvme_fc_delete_association(ctrl); | |
0fd997d3 JS |
2855 | |
2856 | /* resume the io queues so that things will fast fail */ | |
2857 | nvme_start_queues(nctrl); | |
61bff8ef JS |
2858 | } |
2859 | ||
5bbecdbc JS |
2860 | static void |
2861 | nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) | |
2862 | { | |
2b632970 JS |
2863 | struct nvme_fc_rport *rport = ctrl->rport; |
2864 | struct nvme_fc_remote_port *portptr = &rport->remoteport; | |
2865 | unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; | |
2866 | bool recon = true; | |
5bbecdbc | 2867 | |
ad6a0a52 | 2868 | if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) |
5bbecdbc | 2869 | return; |
5bbecdbc | 2870 | |
2b632970 | 2871 | if (portptr->port_state == FC_OBJSTATE_ONLINE) |
5bbecdbc | 2872 | dev_info(ctrl->ctrl.device, |
2b632970 JS |
2873 | "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", |
2874 | ctrl->cnum, status); | |
2875 | else if (time_after_eq(jiffies, rport->dev_loss_end)) | |
2876 | recon = false; | |
5bbecdbc | 2877 | |
2b632970 JS |
2878 | if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { |
2879 | if (portptr->port_state == FC_OBJSTATE_ONLINE) | |
2880 | dev_info(ctrl->ctrl.device, | |
2881 | "NVME-FC{%d}: Reconnect attempt in %ld " | |
2882 | "seconds\n", | |
2883 | ctrl->cnum, recon_delay / HZ); | |
2884 | else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) | |
2885 | recon_delay = rport->dev_loss_end - jiffies; | |
96e24801 | 2886 | |
2b632970 | 2887 | queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); |
5bbecdbc | 2888 | } else { |
2b632970 JS |
2889 | if (portptr->port_state == FC_OBJSTATE_ONLINE) |
2890 | dev_warn(ctrl->ctrl.device, | |
5bbecdbc JS |
2891 | "NVME-FC{%d}: Max reconnect attempts (%d) " |
2892 | "reached. Removing controller\n", | |
fdf9dfa8 | 2893 | ctrl->cnum, ctrl->ctrl.nr_reconnects); |
2b632970 JS |
2894 | else |
2895 | dev_warn(ctrl->ctrl.device, | |
2896 | "NVME-FC{%d}: dev_loss_tmo (%d) expired " | |
2897 | "while waiting for remoteport connectivity. " | |
2898 | "Removing controller\n", ctrl->cnum, | |
2899 | portptr->dev_loss_tmo); | |
c5017e85 | 2900 | WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); |
5bbecdbc JS |
2901 | } |
2902 | } | |
2903 | ||
61bff8ef JS |
2904 | static void |
2905 | nvme_fc_reset_ctrl_work(struct work_struct *work) | |
2906 | { | |
2907 | struct nvme_fc_ctrl *ctrl = | |
d86c4d8e | 2908 | container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); |
61bff8ef JS |
2909 | int ret; |
2910 | ||
d09f2b45 | 2911 | nvme_stop_ctrl(&ctrl->ctrl); |
44c6ec77 | 2912 | |
61bff8ef JS |
2913 | /* will block will waiting for io to terminate */ |
2914 | nvme_fc_delete_association(ctrl); | |
2915 | ||
ad6a0a52 | 2916 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { |
44c6ec77 JS |
2917 | dev_err(ctrl->ctrl.device, |
2918 | "NVME-FC{%d}: error_recovery: Couldn't change state " | |
ad6a0a52 | 2919 | "to CONNECTING\n", ctrl->cnum); |
44c6ec77 JS |
2920 | return; |
2921 | } | |
2922 | ||
2b632970 | 2923 | if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) |
96e24801 | 2924 | ret = nvme_fc_create_association(ctrl); |
2b632970 JS |
2925 | else |
2926 | ret = -ENOTCONN; | |
2927 | ||
5bbecdbc JS |
2928 | if (ret) |
2929 | nvme_fc_reconnect_or_delete(ctrl, ret); | |
2930 | else | |
61bff8ef | 2931 | dev_info(ctrl->ctrl.device, |
2b632970 JS |
2932 | "NVME-FC{%d}: controller reset complete\n", |
2933 | ctrl->cnum); | |
61bff8ef JS |
2934 | } |
2935 | ||
61bff8ef JS |
2936 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { |
2937 | .name = "fc", | |
2938 | .module = THIS_MODULE, | |
d3d5b87d | 2939 | .flags = NVME_F_FABRICS, |
61bff8ef JS |
2940 | .reg_read32 = nvmf_reg_read32, |
2941 | .reg_read64 = nvmf_reg_read64, | |
2942 | .reg_write32 = nvmf_reg_write32, | |
61bff8ef JS |
2943 | .free_ctrl = nvme_fc_nvme_ctrl_freed, |
2944 | .submit_async_event = nvme_fc_submit_async_event, | |
c5017e85 | 2945 | .delete_ctrl = nvme_fc_delete_ctrl, |
61bff8ef | 2946 | .get_address = nvmf_get_address, |
31b84460 | 2947 | .reinit_request = nvme_fc_reinit_request, |
61bff8ef JS |
2948 | }; |
2949 | ||
2950 | static void | |
2951 | nvme_fc_connect_ctrl_work(struct work_struct *work) | |
2952 | { | |
2953 | int ret; | |
2954 | ||
2955 | struct nvme_fc_ctrl *ctrl = | |
2956 | container_of(to_delayed_work(work), | |
2957 | struct nvme_fc_ctrl, connect_work); | |
2958 | ||
2959 | ret = nvme_fc_create_association(ctrl); | |
5bbecdbc JS |
2960 | if (ret) |
2961 | nvme_fc_reconnect_or_delete(ctrl, ret); | |
2962 | else | |
61bff8ef JS |
2963 | dev_info(ctrl->ctrl.device, |
2964 | "NVME-FC{%d}: controller reconnect complete\n", | |
2965 | ctrl->cnum); | |
2966 | } | |
2967 | ||
2968 | ||
2969 | static const struct blk_mq_ops nvme_fc_admin_mq_ops = { | |
2970 | .queue_rq = nvme_fc_queue_rq, | |
2971 | .complete = nvme_fc_complete_rq, | |
76f983cb | 2972 | .init_request = nvme_fc_init_request, |
61bff8ef | 2973 | .exit_request = nvme_fc_exit_request, |
61bff8ef JS |
2974 | .init_hctx = nvme_fc_init_admin_hctx, |
2975 | .timeout = nvme_fc_timeout, | |
2976 | }; | |
2977 | ||
e399441d | 2978 | |
56d5f4f1 JS |
2979 | /* |
2980 | * Fails a controller request if it matches an existing controller | |
2981 | * (association) with the same tuple: | |
2982 | * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> | |
2983 | * | |
2984 | * The ports don't need to be compared as they are intrinsically | |
2985 | * already matched by the port pointers supplied. | |
2986 | */ | |
2987 | static bool | |
2988 | nvme_fc_existing_controller(struct nvme_fc_rport *rport, | |
2989 | struct nvmf_ctrl_options *opts) | |
2990 | { | |
2991 | struct nvme_fc_ctrl *ctrl; | |
2992 | unsigned long flags; | |
2993 | bool found = false; | |
2994 | ||
2995 | spin_lock_irqsave(&rport->lock, flags); | |
2996 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { | |
2997 | found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); | |
2998 | if (found) | |
2999 | break; | |
3000 | } | |
3001 | spin_unlock_irqrestore(&rport->lock, flags); | |
3002 | ||
3003 | return found; | |
3004 | } | |
3005 | ||
e399441d | 3006 | static struct nvme_ctrl * |
61bff8ef | 3007 | nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, |
e399441d JS |
3008 | struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) |
3009 | { | |
3010 | struct nvme_fc_ctrl *ctrl; | |
3011 | unsigned long flags; | |
17c4dc6e | 3012 | int ret, idx, retry; |
e399441d | 3013 | |
85e6a6ad JS |
3014 | if (!(rport->remoteport.port_role & |
3015 | (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { | |
3016 | ret = -EBADR; | |
3017 | goto out_fail; | |
3018 | } | |
3019 | ||
56d5f4f1 JS |
3020 | if (!opts->duplicate_connect && |
3021 | nvme_fc_existing_controller(rport, opts)) { | |
3022 | ret = -EALREADY; | |
3023 | goto out_fail; | |
3024 | } | |
3025 | ||
e399441d JS |
3026 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); |
3027 | if (!ctrl) { | |
3028 | ret = -ENOMEM; | |
3029 | goto out_fail; | |
3030 | } | |
3031 | ||
3032 | idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); | |
3033 | if (idx < 0) { | |
3034 | ret = -ENOSPC; | |
3035 | goto out_free_ctrl; | |
3036 | } | |
3037 | ||
3038 | ctrl->ctrl.opts = opts; | |
3039 | INIT_LIST_HEAD(&ctrl->ctrl_list); | |
e399441d JS |
3040 | ctrl->lport = lport; |
3041 | ctrl->rport = rport; | |
3042 | ctrl->dev = lport->dev; | |
e399441d | 3043 | ctrl->cnum = idx; |
158bfb88 | 3044 | ctrl->assoc_active = false; |
8a82dbf1 | 3045 | init_waitqueue_head(&ctrl->ioabort_wait); |
e399441d | 3046 | |
e399441d JS |
3047 | get_device(ctrl->dev); |
3048 | kref_init(&ctrl->ref); | |
3049 | ||
d86c4d8e | 3050 | INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); |
61bff8ef | 3051 | INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); |
e399441d JS |
3052 | spin_lock_init(&ctrl->lock); |
3053 | ||
3054 | /* io queue count */ | |
d858e5f0 | 3055 | ctrl->ctrl.queue_count = min_t(unsigned int, |
e399441d JS |
3056 | opts->nr_io_queues, |
3057 | lport->ops->max_hw_queues); | |
d858e5f0 | 3058 | ctrl->ctrl.queue_count++; /* +1 for admin queue */ |
e399441d JS |
3059 | |
3060 | ctrl->ctrl.sqsize = opts->queue_size - 1; | |
3061 | ctrl->ctrl.kato = opts->kato; | |
3062 | ||
3063 | ret = -ENOMEM; | |
d858e5f0 SG |
3064 | ctrl->queues = kcalloc(ctrl->ctrl.queue_count, |
3065 | sizeof(struct nvme_fc_queue), GFP_KERNEL); | |
e399441d | 3066 | if (!ctrl->queues) |
61bff8ef | 3067 | goto out_free_ida; |
e399441d | 3068 | |
61bff8ef JS |
3069 | memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); |
3070 | ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; | |
38dabe21 | 3071 | ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; |
61bff8ef JS |
3072 | ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ |
3073 | ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; | |
3074 | ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + | |
3075 | (SG_CHUNK_SIZE * | |
3076 | sizeof(struct scatterlist)) + | |
3077 | ctrl->lport->ops->fcprqst_priv_sz; | |
3078 | ctrl->admin_tag_set.driver_data = ctrl; | |
3079 | ctrl->admin_tag_set.nr_hw_queues = 1; | |
3080 | ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; | |
5a22e2bf | 3081 | ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; |
e399441d | 3082 | |
61bff8ef | 3083 | ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); |
e399441d | 3084 | if (ret) |
61bff8ef | 3085 | goto out_free_queues; |
34b6c231 | 3086 | ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; |
e399441d | 3087 | |
61bff8ef JS |
3088 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); |
3089 | if (IS_ERR(ctrl->ctrl.admin_q)) { | |
3090 | ret = PTR_ERR(ctrl->ctrl.admin_q); | |
3091 | goto out_free_admin_tag_set; | |
e399441d JS |
3092 | } |
3093 | ||
61bff8ef JS |
3094 | /* |
3095 | * Would have been nice to init io queues tag set as well. | |
3096 | * However, we require interaction from the controller | |
3097 | * for max io queue count before we can do so. | |
3098 | * Defer this to the connect path. | |
3099 | */ | |
e399441d | 3100 | |
61bff8ef JS |
3101 | ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); |
3102 | if (ret) | |
3103 | goto out_cleanup_admin_q; | |
e399441d | 3104 | |
61bff8ef | 3105 | /* at this point, teardown path changes to ref counting on nvme ctrl */ |
e399441d JS |
3106 | |
3107 | spin_lock_irqsave(&rport->lock, flags); | |
3108 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); | |
3109 | spin_unlock_irqrestore(&rport->lock, flags); | |
3110 | ||
17c4dc6e JS |
3111 | /* |
3112 | * It's possible that transactions used to create the association | |
3113 | * may fail. Examples: CreateAssociation LS or CreateIOConnection | |
3114 | * LS gets dropped/corrupted/fails; or a frame gets dropped or a | |
3115 | * command times out for one of the actions to init the controller | |
3116 | * (Connect, Get/Set_Property, Set_Features, etc). Many of these | |
3117 | * transport errors (frame drop, LS failure) inherently must kill | |
3118 | * the association. The transport is coded so that any command used | |
3119 | * to create the association (prior to a LIVE state transition | |
ad6a0a52 | 3120 | * while NEW or CONNECTING) will fail if it completes in error or |
17c4dc6e JS |
3121 | * times out. |
3122 | * | |
3123 | * As such: as the connect request was mostly likely due to a | |
3124 | * udev event that discovered the remote port, meaning there is | |
3125 | * not an admin or script there to restart if the connect | |
3126 | * request fails, retry the initial connection creation up to | |
3127 | * three times before giving up and declaring failure. | |
3128 | */ | |
3129 | for (retry = 0; retry < 3; retry++) { | |
3130 | ret = nvme_fc_create_association(ctrl); | |
3131 | if (!ret) | |
3132 | break; | |
3133 | } | |
3134 | ||
61bff8ef | 3135 | if (ret) { |
17c4dc6e JS |
3136 | /* couldn't schedule retry - fail out */ |
3137 | dev_err(ctrl->ctrl.device, | |
3138 | "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum); | |
3139 | ||
de41447a | 3140 | ctrl->ctrl.opts = NULL; |
17c4dc6e | 3141 | |
61bff8ef JS |
3142 | /* initiate nvme ctrl ref counting teardown */ |
3143 | nvme_uninit_ctrl(&ctrl->ctrl); | |
61bff8ef | 3144 | |
0b5a7669 JS |
3145 | /* Remove core ctrl ref. */ |
3146 | nvme_put_ctrl(&ctrl->ctrl); | |
3147 | ||
61bff8ef JS |
3148 | /* as we're past the point where we transition to the ref |
3149 | * counting teardown path, if we return a bad pointer here, | |
3150 | * the calling routine, thinking it's prior to the | |
3151 | * transition, will do an rport put. Since the teardown | |
3152 | * path also does a rport put, we do an extra get here to | |
3153 | * so proper order/teardown happens. | |
3154 | */ | |
3155 | nvme_fc_rport_get(rport); | |
3156 | ||
3157 | if (ret > 0) | |
3158 | ret = -EIO; | |
3159 | return ERR_PTR(ret); | |
e399441d JS |
3160 | } |
3161 | ||
d22524a4 | 3162 | nvme_get_ctrl(&ctrl->ctrl); |
2cb657bc | 3163 | |
61bff8ef JS |
3164 | dev_info(ctrl->ctrl.device, |
3165 | "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", | |
3166 | ctrl->cnum, ctrl->ctrl.opts->subsysnqn); | |
e399441d | 3167 | |
61bff8ef | 3168 | return &ctrl->ctrl; |
e399441d | 3169 | |
61bff8ef JS |
3170 | out_cleanup_admin_q: |
3171 | blk_cleanup_queue(ctrl->ctrl.admin_q); | |
3172 | out_free_admin_tag_set: | |
3173 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | |
3174 | out_free_queues: | |
3175 | kfree(ctrl->queues); | |
e399441d | 3176 | out_free_ida: |
61bff8ef | 3177 | put_device(ctrl->dev); |
e399441d JS |
3178 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); |
3179 | out_free_ctrl: | |
3180 | kfree(ctrl); | |
3181 | out_fail: | |
e399441d JS |
3182 | /* exit via here doesn't follow ctlr ref points */ |
3183 | return ERR_PTR(ret); | |
3184 | } | |
3185 | ||
e399441d JS |
3186 | |
3187 | struct nvmet_fc_traddr { | |
3188 | u64 nn; | |
3189 | u64 pn; | |
3190 | }; | |
3191 | ||
e399441d | 3192 | static int |
9c5358e1 | 3193 | __nvme_fc_parse_u64(substring_t *sstr, u64 *val) |
e399441d | 3194 | { |
e399441d JS |
3195 | u64 token64; |
3196 | ||
9c5358e1 JS |
3197 | if (match_u64(sstr, &token64)) |
3198 | return -EINVAL; | |
3199 | *val = token64; | |
e399441d | 3200 | |
9c5358e1 JS |
3201 | return 0; |
3202 | } | |
e399441d | 3203 | |
9c5358e1 JS |
3204 | /* |
3205 | * This routine validates and extracts the WWN's from the TRADDR string. | |
3206 | * As kernel parsers need the 0x to determine number base, universally | |
3207 | * build string to parse with 0x prefix before parsing name strings. | |
3208 | */ | |
3209 | static int | |
3210 | nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) | |
3211 | { | |
3212 | char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; | |
3213 | substring_t wwn = { name, &name[sizeof(name)-1] }; | |
3214 | int nnoffset, pnoffset; | |
3215 | ||
3216 | /* validate it string one of the 2 allowed formats */ | |
3217 | if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && | |
3218 | !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && | |
3219 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], | |
3220 | "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { | |
3221 | nnoffset = NVME_FC_TRADDR_OXNNLEN; | |
3222 | pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + | |
3223 | NVME_FC_TRADDR_OXNNLEN; | |
3224 | } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && | |
3225 | !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && | |
3226 | !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], | |
3227 | "pn-", NVME_FC_TRADDR_NNLEN))) { | |
3228 | nnoffset = NVME_FC_TRADDR_NNLEN; | |
3229 | pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; | |
3230 | } else | |
3231 | goto out_einval; | |
e399441d | 3232 | |
9c5358e1 JS |
3233 | name[0] = '0'; |
3234 | name[1] = 'x'; | |
3235 | name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; | |
3236 | ||
3237 | memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); | |
3238 | if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) | |
3239 | goto out_einval; | |
3240 | ||
3241 | memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); | |
3242 | if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) | |
3243 | goto out_einval; | |
3244 | ||
3245 | return 0; | |
3246 | ||
3247 | out_einval: | |
3248 | pr_warn("%s: bad traddr string\n", __func__); | |
3249 | return -EINVAL; | |
e399441d JS |
3250 | } |
3251 | ||
3252 | static struct nvme_ctrl * | |
3253 | nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) | |
3254 | { | |
3255 | struct nvme_fc_lport *lport; | |
3256 | struct nvme_fc_rport *rport; | |
61bff8ef | 3257 | struct nvme_ctrl *ctrl; |
e399441d JS |
3258 | struct nvmet_fc_traddr laddr = { 0L, 0L }; |
3259 | struct nvmet_fc_traddr raddr = { 0L, 0L }; | |
3260 | unsigned long flags; | |
3261 | int ret; | |
3262 | ||
9c5358e1 | 3263 | ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); |
e399441d JS |
3264 | if (ret || !raddr.nn || !raddr.pn) |
3265 | return ERR_PTR(-EINVAL); | |
3266 | ||
9c5358e1 | 3267 | ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); |
e399441d JS |
3268 | if (ret || !laddr.nn || !laddr.pn) |
3269 | return ERR_PTR(-EINVAL); | |
3270 | ||
3271 | /* find the host and remote ports to connect together */ | |
3272 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
3273 | list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { | |
3274 | if (lport->localport.node_name != laddr.nn || | |
3275 | lport->localport.port_name != laddr.pn) | |
3276 | continue; | |
3277 | ||
3278 | list_for_each_entry(rport, &lport->endp_list, endp_list) { | |
3279 | if (rport->remoteport.node_name != raddr.nn || | |
3280 | rport->remoteport.port_name != raddr.pn) | |
3281 | continue; | |
3282 | ||
3283 | /* if fail to get reference fall through. Will error */ | |
3284 | if (!nvme_fc_rport_get(rport)) | |
3285 | break; | |
3286 | ||
3287 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
3288 | ||
61bff8ef JS |
3289 | ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); |
3290 | if (IS_ERR(ctrl)) | |
3291 | nvme_fc_rport_put(rport); | |
3292 | return ctrl; | |
e399441d JS |
3293 | } |
3294 | } | |
3295 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
3296 | ||
3297 | return ERR_PTR(-ENOENT); | |
3298 | } | |
3299 | ||
3300 | ||
3301 | static struct nvmf_transport_ops nvme_fc_transport = { | |
3302 | .name = "fc", | |
0de5cd36 | 3303 | .module = THIS_MODULE, |
e399441d | 3304 | .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, |
5bbecdbc | 3305 | .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, |
e399441d JS |
3306 | .create_ctrl = nvme_fc_create_ctrl, |
3307 | }; | |
3308 | ||
3309 | static int __init nvme_fc_init_module(void) | |
3310 | { | |
5f568556 JS |
3311 | int ret; |
3312 | ||
3313 | /* | |
3314 | * NOTE: | |
3315 | * It is expected that in the future the kernel will combine | |
3316 | * the FC-isms that are currently under scsi and now being | |
3317 | * added to by NVME into a new standalone FC class. The SCSI | |
3318 | * and NVME protocols and their devices would be under this | |
3319 | * new FC class. | |
3320 | * | |
3321 | * As we need something to post FC-specific udev events to, | |
3322 | * specifically for nvme probe events, start by creating the | |
3323 | * new device class. When the new standalone FC class is | |
3324 | * put in place, this code will move to a more generic | |
3325 | * location for the class. | |
3326 | */ | |
3327 | fc_class = class_create(THIS_MODULE, "fc"); | |
3328 | if (IS_ERR(fc_class)) { | |
3329 | pr_err("couldn't register class fc\n"); | |
3330 | return PTR_ERR(fc_class); | |
3331 | } | |
3332 | ||
3333 | /* | |
3334 | * Create a device for the FC-centric udev events | |
3335 | */ | |
3336 | fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL, | |
3337 | "fc_udev_device"); | |
3338 | if (IS_ERR(fc_udev_device)) { | |
3339 | pr_err("couldn't create fc_udev device!\n"); | |
3340 | ret = PTR_ERR(fc_udev_device); | |
3341 | goto out_destroy_class; | |
3342 | } | |
3343 | ||
3344 | ret = nvmf_register_transport(&nvme_fc_transport); | |
3345 | if (ret) | |
3346 | goto out_destroy_device; | |
3347 | ||
3348 | return 0; | |
3349 | ||
3350 | out_destroy_device: | |
3351 | device_destroy(fc_class, MKDEV(0, 0)); | |
3352 | out_destroy_class: | |
3353 | class_destroy(fc_class); | |
3354 | return ret; | |
e399441d JS |
3355 | } |
3356 | ||
3357 | static void __exit nvme_fc_exit_module(void) | |
3358 | { | |
3359 | /* sanity check - all lports should be removed */ | |
3360 | if (!list_empty(&nvme_fc_lport_list)) | |
3361 | pr_warn("%s: localport list not empty\n", __func__); | |
3362 | ||
3363 | nvmf_unregister_transport(&nvme_fc_transport); | |
3364 | ||
e399441d JS |
3365 | ida_destroy(&nvme_fc_local_port_cnt); |
3366 | ida_destroy(&nvme_fc_ctrl_cnt); | |
5f568556 JS |
3367 | |
3368 | device_destroy(fc_class, MKDEV(0, 0)); | |
3369 | class_destroy(fc_class); | |
e399441d JS |
3370 | } |
3371 | ||
3372 | module_init(nvme_fc_init_module); | |
3373 | module_exit(nvme_fc_exit_module); | |
3374 | ||
3375 | MODULE_LICENSE("GPL v2"); |