scsi: qla2xxx: Enable type checking for the SRB free and done callback functions
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_nvme.c
CommitLineData
e84067d7
DG
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_nvme.h"
e84067d7
DG
8#include <linux/scatterlist.h>
9#include <linux/delay.h>
10#include <linux/nvme.h>
11#include <linux/nvme-fc.h>
12
13static struct nvme_fc_port_template qla_nvme_fc_transport;
14
0f7e51f6 15int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
e84067d7 16{
9dd9686b
DT
17 struct qla_nvme_rport *rport;
18 struct nvme_fc_port_info req;
e84067d7
DG
19 int ret;
20
bcda771b
AB
21 if (!IS_ENABLED(CONFIG_NVME_FC))
22 return 0;
23
e84067d7
DG
24 if (!vha->flags.nvme_enabled) {
25 ql_log(ql_log_info, vha, 0x2100,
26 "%s: Not registering target since Host NVME is not enabled\n",
27 __func__);
28 return 0;
29 }
30
8777e431
QT
31 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
32 return 0;
33
e84067d7 34 if (!(fcport->nvme_prli_service_param &
9dd9686b
DT
35 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
36 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
e84067d7
DG
37 return 0;
38
870fe24f 39 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
e84067d7 40
9dd9686b
DT
41 memset(&req, 0, sizeof(struct nvme_fc_port_info));
42 req.port_name = wwn_to_u64(fcport->port_name);
43 req.node_name = wwn_to_u64(fcport->node_name);
44 req.port_role = 0;
45 req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
e84067d7
DG
46
47 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
9dd9686b 48 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
e84067d7
DG
49
50 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
9dd9686b 51 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
e84067d7
DG
52
53 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
9dd9686b 54 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
e84067d7 55
9dd9686b 56 req.port_id = fcport->d_id.b24;
e84067d7
DG
57
58 ql_log(ql_log_info, vha, 0x2102,
d7936a96 59 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
9dd9686b
DT
60 __func__, req.node_name, req.port_name,
61 req.port_id);
e84067d7 62
9dd9686b 63 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
e84067d7
DG
64 &fcport->nvme_remote_port);
65 if (ret) {
66 ql_log(ql_log_warn, vha, 0x212e,
67 "Failed to register remote port. Transport returned %d\n",
68 ret);
69 return ret;
70 }
71
9dd9686b 72 rport = fcport->nvme_remote_port->private;
e84067d7 73 rport->fcport = fcport;
9dd9686b
DT
74
75 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
e84067d7
DG
76 return 0;
77}
78
79/* Allocate a queue for NVMe traffic */
6fcd98fd 80static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
81 unsigned int qidx, u16 qsize, void **handle)
e84067d7
DG
82{
83 struct scsi_qla_host *vha;
84 struct qla_hw_data *ha;
85 struct qla_qpair *qpair;
86
87 if (!qidx)
88 qidx++;
89
90 vha = (struct scsi_qla_host *)lport->private;
91 ha = vha->hw;
92
93 ql_log(ql_log_info, vha, 0x2104,
94 "%s: handle %p, idx =%d, qsize %d\n",
95 __func__, handle, qidx, qsize);
96
97 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
98 ql_log(ql_log_warn, vha, 0x212f,
99 "%s: Illegal qidx=%d. Max=%d\n",
100 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
101 return -EINVAL;
102 }
103
104 if (ha->queue_pair_map[qidx]) {
105 *handle = ha->queue_pair_map[qidx];
106 ql_log(ql_log_info, vha, 0x2121,
107 "Returning existing qpair of %p for idx=%x\n",
108 *handle, qidx);
109 return 0;
110 }
111
e84067d7
DG
112 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
113 if (qpair == NULL) {
114 ql_log(ql_log_warn, vha, 0x2122,
115 "Failed to allocate qpair\n");
116 return -EINVAL;
117 }
118 *handle = qpair;
119
120 return 0;
121}
122
4c2a2d01
QT
123static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
124{
125 struct srb *sp = container_of(kref, struct srb, cmd_kref);
126 struct nvme_private *priv = (struct nvme_private *)sp->priv;
127 struct nvmefc_fcp_req *fd;
128 struct srb_iocb *nvme;
129 unsigned long flags;
130
131 if (!priv)
132 goto out;
133
134 nvme = &sp->u.iocb_cmd;
135 fd = nvme->u.nvme.desc;
136
137 spin_lock_irqsave(&priv->cmd_lock, flags);
138 priv->sp = NULL;
139 sp->priv = NULL;
140 if (priv->comp_status == QLA_SUCCESS) {
141 fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
142 } else {
143 fd->rcv_rsplen = 0;
144 fd->transferred_length = 0;
145 }
146 fd->status = 0;
147 spin_unlock_irqrestore(&priv->cmd_lock, flags);
148
149 fd->done(fd);
150out:
151 qla2xxx_rel_qpair_sp(sp->qpair, sp);
152}
153
154static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
155{
156 struct srb *sp = container_of(kref, struct srb, cmd_kref);
157 struct nvme_private *priv = (struct nvme_private *)sp->priv;
158 struct nvmefc_ls_req *fd;
159 unsigned long flags;
160
161 if (!priv)
162 goto out;
163
164 spin_lock_irqsave(&priv->cmd_lock, flags);
165 priv->sp = NULL;
166 sp->priv = NULL;
167 spin_unlock_irqrestore(&priv->cmd_lock, flags);
168
169 fd = priv->fd;
170 fd->done(fd, priv->comp_status);
171out:
172 qla2x00_rel_sp(sp);
173}
174
175static void qla_nvme_ls_complete(struct work_struct *work)
176{
177 struct nvme_private *priv =
178 container_of(work, struct nvme_private, ls_work);
179
180 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
181}
182
6c18a43e 183static void qla_nvme_sp_ls_done(srb_t *sp, int res)
e84067d7 184{
6c18a43e 185 struct nvme_private *priv = sp->priv;
e84067d7 186
4c2a2d01 187 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
e84067d7 188 return;
e84067d7 189
e84067d7
DG
190 if (res)
191 res = -EINVAL;
192
e84067d7 193 priv->comp_status = res;
4c2a2d01 194 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
e84067d7 195 schedule_work(&priv->ls_work);
e84067d7
DG
196}
197
4c2a2d01 198/* it assumed that QPair lock is held. */
6c18a43e 199static void qla_nvme_sp_done(srb_t *sp, int res)
e84067d7 200{
6c18a43e 201 struct nvme_private *priv = sp->priv;
e84067d7 202
4c2a2d01
QT
203 priv->comp_status = res;
204 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
9dd9686b 205
cf19c45d 206 return;
e84067d7
DG
207}
208
e473b307 209static void qla_nvme_abort_work(struct work_struct *work)
e84067d7 210{
e473b307
DT
211 struct nvme_private *priv =
212 container_of(work, struct nvme_private, abort_work);
e84067d7 213 srb_t *sp = priv->sp;
e473b307 214 fc_port_t *fcport = sp->fcport;
e84067d7 215 struct qla_hw_data *ha = fcport->vha->hw;
e473b307 216 int rval;
e84067d7 217
dafbe56f
BVA
218 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
219 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
220 __func__, sp, sp->handle, fcport, fcport->deleted);
471f8e03 221
53be100b 222 if (!ha->flags.fw_started && fcport->deleted)
4c2a2d01 223 goto out;
471f8e03 224
b2d1453a
GM
225 if (ha->flags.host_shutting_down) {
226 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
227 "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
228 __func__, sp, sp->type, atomic_read(&sp->ref_count));
229 sp->done(sp, 0);
4c2a2d01 230 goto out;
b2d1453a
GM
231 }
232
e84067d7 233 rval = ha->isp_ops->abort_command(sp);
e84067d7
DG
234
235 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
870fe24f
DT
236 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
237 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
238 sp, sp->handle, fcport, rval);
4c2a2d01
QT
239
240out:
241 /* kref_get was done before work was schedule. */
242 kref_put(&sp->cmd_kref, sp->put_fn);
e84067d7
DG
243}
244
e473b307
DT
245static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
246 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
247{
248 struct nvme_private *priv = fd->private;
4c2a2d01
QT
249 unsigned long flags;
250
251 spin_lock_irqsave(&priv->cmd_lock, flags);
252 if (!priv->sp) {
253 spin_unlock_irqrestore(&priv->cmd_lock, flags);
254 return;
255 }
256
257 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
258 spin_unlock_irqrestore(&priv->cmd_lock, flags);
259 return;
260 }
261 spin_unlock_irqrestore(&priv->cmd_lock, flags);
e473b307
DT
262
263 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
264 schedule_work(&priv->abort_work);
265}
266
e84067d7
DG
267static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
268 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
269{
9dd9686b
DT
270 struct qla_nvme_rport *qla_rport = rport->private;
271 fc_port_t *fcport = qla_rport->fcport;
e84067d7
DG
272 struct srb_iocb *nvme;
273 struct nvme_private *priv = fd->private;
274 struct scsi_qla_host *vha;
275 int rval = QLA_FUNCTION_FAILED;
276 struct qla_hw_data *ha;
277 srb_t *sp;
278
2eb9238a
QT
279
280 if (!fcport || (fcport && fcport->deleted))
281 return rval;
282
e84067d7
DG
283 vha = fcport->vha;
284 ha = vha->hw;
2eb9238a
QT
285
286 if (!ha->flags.fw_started)
287 return rval;
288
e84067d7
DG
289 /* Alloc SRB structure */
290 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
291 if (!sp)
292 return rval;
293
294 sp->type = SRB_NVME_LS;
295 sp->name = "nvme_ls";
296 sp->done = qla_nvme_sp_ls_done;
4c2a2d01
QT
297 sp->put_fn = qla_nvme_release_ls_cmd_kref;
298 sp->priv = (void *)priv;
e84067d7 299 priv->sp = sp;
4c2a2d01
QT
300 kref_init(&sp->cmd_kref);
301 spin_lock_init(&priv->cmd_lock);
302 nvme = &sp->u.iocb_cmd;
e84067d7 303 priv->fd = fd;
e84067d7
DG
304 nvme->u.nvme.desc = fd;
305 nvme->u.nvme.dir = 0;
306 nvme->u.nvme.dl = 0;
307 nvme->u.nvme.cmd_len = fd->rqstlen;
308 nvme->u.nvme.rsp_len = fd->rsplen;
309 nvme->u.nvme.rsp_dma = fd->rspdma;
310 nvme->u.nvme.timeout_sec = fd->timeout;
311 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
312 fd->rqstlen, DMA_TO_DEVICE);
313 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
314 fd->rqstlen, DMA_TO_DEVICE);
315
316 rval = qla2x00_start_sp(sp);
317 if (rval != QLA_SUCCESS) {
318 ql_log(ql_log_warn, vha, 0x700e,
319 "qla2x00_start_sp failed = %d\n", rval);
6fcd98fd 320 wake_up(&sp->nvme_ls_waitq);
4c2a2d01
QT
321 sp->priv = NULL;
322 priv->sp = NULL;
323 qla2x00_rel_sp(sp);
e84067d7
DG
324 return rval;
325 }
326
327 return rval;
328}
329
330static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
331 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
332 struct nvmefc_fcp_req *fd)
333{
334 struct nvme_private *priv = fd->private;
4c2a2d01
QT
335 unsigned long flags;
336
337 spin_lock_irqsave(&priv->cmd_lock, flags);
338 if (!priv->sp) {
339 spin_unlock_irqrestore(&priv->cmd_lock, flags);
340 return;
341 }
342 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
343 spin_unlock_irqrestore(&priv->cmd_lock, flags);
344 return;
345 }
346 spin_unlock_irqrestore(&priv->cmd_lock, flags);
e84067d7 347
e473b307
DT
348 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
349 schedule_work(&priv->abort_work);
e84067d7
DG
350}
351
60dd6e8e 352static inline int qla2x00_start_nvme_mq(srb_t *sp)
e84067d7
DG
353{
354 unsigned long flags;
355 uint32_t *clr_ptr;
356 uint32_t index;
357 uint32_t handle;
358 struct cmd_nvme *cmd_pkt;
359 uint16_t cnt, i;
360 uint16_t req_cnt;
361 uint16_t tot_dsds;
362 uint16_t avail_dsds;
15b7a68c 363 struct dsd64 *cur_dsd;
e84067d7
DG
364 struct req_que *req = NULL;
365 struct scsi_qla_host *vha = sp->fcport->vha;
366 struct qla_hw_data *ha = vha->hw;
367 struct qla_qpair *qpair = sp->qpair;
368 struct srb_iocb *nvme = &sp->u.iocb_cmd;
369 struct scatterlist *sgl, *sg;
370 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
371 uint32_t rval = QLA_SUCCESS;
372
1d4614e1 373 /* Setup qpair pointers */
374 req = qpair->req;
e84067d7
DG
375 tot_dsds = fd->sg_cnt;
376
377 /* Acquire qpair specific lock */
378 spin_lock_irqsave(&qpair->qp_lock, flags);
379
380 /* Check for room in outstanding command list. */
381 handle = req->current_outstanding_cmd;
382 for (index = 1; index < req->num_outstanding_cmds; index++) {
383 handle++;
384 if (handle == req->num_outstanding_cmds)
385 handle = 1;
386 if (!req->outstanding_cmds[handle])
387 break;
388 }
389
390 if (index == req->num_outstanding_cmds) {
870fe24f 391 rval = -EBUSY;
e84067d7
DG
392 goto queuing_error;
393 }
394 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
395 if (req->cnt < (req_cnt + 2)) {
396 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
397 RD_REG_DWORD_RELAXED(req->req_q_out);
398
399 if (req->ring_index < cnt)
400 req->cnt = cnt - req->ring_index;
401 else
402 req->cnt = req->length - (req->ring_index - cnt);
403
404 if (req->cnt < (req_cnt + 2)){
870fe24f 405 rval = -EBUSY;
e84067d7
DG
406 goto queuing_error;
407 }
408 }
409
410 if (unlikely(!fd->sqid)) {
411 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
bd432bb5 412
e84067d7
DG
413 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
414 nvme->u.nvme.aen_op = 1;
1d4614e1 415 atomic_inc(&ha->nvme_active_aen_cnt);
e84067d7
DG
416 }
417 }
418
419 /* Build command packet. */
420 req->current_outstanding_cmd = handle;
421 req->outstanding_cmds[handle] = sp;
422 sp->handle = handle;
423 req->cnt -= req_cnt;
424
425 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
426 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
427
428 /* Zero out remaining portion of packet. */
429 clr_ptr = (uint32_t *)cmd_pkt + 2;
430 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
431
432 cmd_pkt->entry_status = 0;
433
434 /* Update entry type to indicate Command NVME IOCB */
435 cmd_pkt->entry_type = COMMAND_NVME;
436
437 /* No data transfer how do we check buffer len == 0?? */
438 if (fd->io_dir == NVMEFC_FCP_READ) {
03aaa89f 439 cmd_pkt->control_flags = CF_READ_DATA;
e84067d7
DG
440 vha->qla_stats.input_bytes += fd->payload_length;
441 vha->qla_stats.input_requests++;
442 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
03aaa89f
DT
443 cmd_pkt->control_flags = CF_WRITE_DATA;
444 if ((vha->flags.nvme_first_burst) &&
445 (sp->fcport->nvme_prli_service_param &
446 NVME_PRLI_SP_FIRST_BURST)) {
447 if ((fd->payload_length <=
448 sp->fcport->nvme_first_burst_size) ||
449 (sp->fcport->nvme_first_burst_size == 0))
450 cmd_pkt->control_flags |=
451 CF_NVME_FIRST_BURST_ENABLE;
452 }
e84067d7
DG
453 vha->qla_stats.output_bytes += fd->payload_length;
454 vha->qla_stats.output_requests++;
455 } else if (fd->io_dir == 0) {
03aaa89f 456 cmd_pkt->control_flags = 0;
e84067d7
DG
457 }
458
459 /* Set NPORT-ID */
460 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
461 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
462 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
463 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
464 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
465
466 /* NVME RSP IU */
467 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
d4556a49 468 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
e84067d7
DG
469
470 /* NVME CNMD IU */
471 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
d4556a49 472 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
e84067d7
DG
473
474 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
475 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
476
477 /* One DSD is available in the Command Type NVME IOCB */
478 avail_dsds = 1;
15b7a68c 479 cur_dsd = &cmd_pkt->nvme_dsd;
e84067d7
DG
480 sgl = fd->first_sgl;
481
482 /* Load data segments */
483 for_each_sg(sgl, sg, tot_dsds, i) {
e84067d7
DG
484 cont_a64_entry_t *cont_pkt;
485
486 /* Allocate additional continuation packets? */
487 if (avail_dsds == 0) {
488 /*
489 * Five DSDs are available in the Continuation
490 * Type 1 IOCB.
491 */
492
493 /* Adjust ring index */
494 req->ring_index++;
495 if (req->ring_index == req->length) {
496 req->ring_index = 0;
497 req->ring_ptr = req->ring;
498 } else {
499 req->ring_ptr++;
500 }
501 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
2c26348c
BVA
502 put_unaligned_le32(CONTINUE_A64_TYPE,
503 &cont_pkt->entry_type);
e84067d7 504
15b7a68c
BVA
505 cur_dsd = cont_pkt->dsd;
506 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
e84067d7
DG
507 }
508
15b7a68c 509 append_dsd64(&cur_dsd, sg);
e84067d7
DG
510 avail_dsds--;
511 }
512
513 /* Set total entry count. */
514 cmd_pkt->entry_count = (uint8_t)req_cnt;
515 wmb();
516
517 /* Adjust ring index. */
518 req->ring_index++;
519 if (req->ring_index == req->length) {
520 req->ring_index = 0;
521 req->ring_ptr = req->ring;
522 } else {
523 req->ring_ptr++;
524 }
525
526 /* Set chip new ring index. */
527 WRT_REG_DWORD(req->req_q_in, req->ring_index);
528
529queuing_error:
530 spin_unlock_irqrestore(&qpair->qp_lock, flags);
531 return rval;
532}
533
534/* Post a command */
535static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
536 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
537 struct nvmefc_fcp_req *fd)
538{
539 fc_port_t *fcport;
540 struct srb_iocb *nvme;
541 struct scsi_qla_host *vha;
870fe24f 542 int rval = -ENODEV;
e84067d7 543 srb_t *sp;
6fcd98fd 544 struct qla_qpair *qpair = hw_queue_handle;
5e6803b4 545 struct nvme_private *priv = fd->private;
9dd9686b 546 struct qla_nvme_rport *qla_rport = rport->private;
e84067d7 547
9dd9686b 548 fcport = qla_rport->fcport;
e84067d7 549
2eb9238a
QT
550 if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
551 (fcport && fcport->deleted))
623ee824
DT
552 return rval;
553
2eb9238a 554 vha = fcport->vha;
870fe24f
DT
555 /*
556 * If we know the dev is going away while the transport is still sending
557 * IO's return busy back to stall the IO Q. This happens when the
558 * link goes away and fw hasn't notified us yet, but IO's are being
559 * returned. If the dev comes back quickly we won't exhaust the IO
560 * retry count at the core.
561 */
562 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
e84067d7
DG
563 return -EBUSY;
564
565 /* Alloc SRB structure */
6a629468 566 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
e84067d7 567 if (!sp)
870fe24f 568 return -EBUSY;
e84067d7 569
6fcd98fd 570 init_waitqueue_head(&sp->nvme_ls_waitq);
4c2a2d01
QT
571 kref_init(&sp->cmd_kref);
572 spin_lock_init(&priv->cmd_lock);
573 sp->priv = (void *)priv;
e84067d7
DG
574 priv->sp = sp;
575 sp->type = SRB_NVME_CMD;
576 sp->name = "nvme_cmd";
577 sp->done = qla_nvme_sp_done;
4c2a2d01 578 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
e84067d7 579 sp->qpair = qpair;
5e6803b4 580 sp->vha = vha;
e84067d7
DG
581 nvme = &sp->u.iocb_cmd;
582 nvme->u.nvme.desc = fd;
583
584 rval = qla2x00_start_nvme_mq(sp);
585 if (rval != QLA_SUCCESS) {
586 ql_log(ql_log_warn, vha, 0x212d,
587 "qla2x00_start_nvme_mq failed = %d\n", rval);
6fcd98fd 588 wake_up(&sp->nvme_ls_waitq);
4c2a2d01
QT
589 sp->priv = NULL;
590 priv->sp = NULL;
591 qla2xxx_rel_qpair_sp(sp->qpair, sp);
e84067d7
DG
592 }
593
594 return rval;
595}
596
597static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
598{
599 struct scsi_qla_host *vha = lport->private;
600
e84067d7
DG
601 ql_log(ql_log_info, vha, 0x210f,
602 "localport delete of %p completed.\n", vha->nvme_local_port);
603 vha->nvme_local_port = NULL;
5621b0dd 604 complete(&vha->nvme_del_done);
e84067d7
DG
605}
606
607static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
608{
609 fc_port_t *fcport;
6a81533d 610 struct qla_nvme_rport *qla_rport = rport->private;
e84067d7 611
9dd9686b 612 fcport = qla_rport->fcport;
e84067d7
DG
613 fcport->nvme_remote_port = NULL;
614 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
5e6803b4 615 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
e84067d7 616 ql_log(ql_log_info, fcport->vha, 0x2110,
2eb9238a
QT
617 "remoteport_delete of %p %8phN completed.\n",
618 fcport, fcport->port_name);
baf23edd 619 complete(&fcport->nvme_del_done);
e84067d7
DG
620}
621
622static struct nvme_fc_port_template qla_nvme_fc_transport = {
623 .localport_delete = qla_nvme_localport_delete,
624 .remoteport_delete = qla_nvme_remoteport_delete,
625 .create_queue = qla_nvme_alloc_queue,
626 .delete_queue = NULL,
627 .ls_req = qla_nvme_ls_req,
628 .ls_abort = qla_nvme_ls_abort,
629 .fcp_io = qla_nvme_post_cmd,
630 .fcp_abort = qla_nvme_fcp_abort,
e84067d7 631 .max_hw_queues = 8,
6b1f4446 632 .max_sgl_segments = 1024,
e84067d7
DG
633 .max_dif_sgl_segments = 64,
634 .dma_boundary = 0xFFFFFFFF,
635 .local_priv_sz = 8,
9dd9686b 636 .remote_priv_sz = sizeof(struct qla_nvme_rport),
e84067d7
DG
637 .lsrqst_priv_sz = sizeof(struct nvme_private),
638 .fcprqst_priv_sz = sizeof(struct nvme_private),
639};
640
baf23edd 641void qla_nvme_unregister_remote_port(struct fc_port *fcport)
e84067d7 642{
6a81533d 643 int ret;
e84067d7 644
bcda771b
AB
645 if (!IS_ENABLED(CONFIG_NVME_FC))
646 return;
647
49b3d5f6 648 ql_log(ql_log_warn, NULL, 0x2112,
2eb9238a
QT
649 "%s: unregister remoteport on %p %8phN\n",
650 __func__, fcport, fcport->port_name);
49b3d5f6 651
03cc44bf
QT
652 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
653 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
654
6a81533d
AE
655 init_completion(&fcport->nvme_del_done);
656 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
657 if (ret)
658 ql_log(ql_log_info, fcport->vha, 0x2114,
659 "%s: Failed to unregister nvme_remote_port (%d)\n",
660 __func__, ret);
661 wait_for_completion(&fcport->nvme_del_done);
e84067d7
DG
662}
663
0f7e51f6 664void qla_nvme_delete(struct scsi_qla_host *vha)
e84067d7 665{
e84067d7
DG
666 int nv_ret;
667
bcda771b
AB
668 if (!IS_ENABLED(CONFIG_NVME_FC))
669 return;
670
e84067d7 671 if (vha->nvme_local_port) {
5621b0dd 672 init_completion(&vha->nvme_del_done);
9dd9686b
DT
673 ql_log(ql_log_info, vha, 0x2116,
674 "unregister localport=%p\n",
675 vha->nvme_local_port);
e84067d7 676 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
9dd9686b 677 if (nv_ret)
e84067d7
DG
678 ql_log(ql_log_info, vha, 0x2115,
679 "Unregister of localport failed\n");
9dd9686b
DT
680 else
681 wait_for_completion(&vha->nvme_del_done);
e84067d7 682 }
e84067d7
DG
683}
684
8777e431 685int qla_nvme_register_hba(struct scsi_qla_host *vha)
e84067d7 686{
e84067d7
DG
687 struct nvme_fc_port_template *tmpl;
688 struct qla_hw_data *ha;
689 struct nvme_fc_port_info pinfo;
8777e431 690 int ret = EINVAL;
e84067d7 691
bcda771b 692 if (!IS_ENABLED(CONFIG_NVME_FC))
8777e431 693 return ret;
bcda771b 694
e84067d7
DG
695 ha = vha->hw;
696 tmpl = &qla_nvme_fc_transport;
697
698 WARN_ON(vha->nvme_local_port);
699 WARN_ON(ha->max_req_queues < 3);
700
701 qla_nvme_fc_transport.max_hw_queues =
702 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
703 (uint8_t)(ha->max_req_queues - 2));
704
705 pinfo.node_name = wwn_to_u64(vha->node_name);
706 pinfo.port_name = wwn_to_u64(vha->port_name);
707 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
708 pinfo.port_id = vha->d_id.b24;
709
710 ql_log(ql_log_info, vha, 0xffff,
d7936a96
DT
711 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
712 pinfo.node_name, pinfo.port_name, pinfo.port_id);
e84067d7
DG
713 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
714
715 ret = nvme_fc_register_localport(&pinfo, tmpl,
716 get_device(&ha->pdev->dev), &vha->nvme_local_port);
717 if (ret) {
718 ql_log(ql_log_warn, vha, 0xffff,
719 "register_localport failed: ret=%x\n", ret);
8777e431
QT
720 } else {
721 vha->nvme_local_port->private = vha;
e84067d7 722 }
8777e431
QT
723
724 return ret;
e84067d7 725}