scsi: qla2xxx: Adjust IOCB resource on qpair create
[linux-block.git] / drivers / scsi / qla2xxx / qla_nvme.c
CommitLineData
77adf3f0 1// SPDX-License-Identifier: GPL-2.0-only
e84067d7
DG
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2017 QLogic Corporation
e84067d7
DG
5 */
6#include "qla_nvme.h"
e84067d7
DG
7#include <linux/scatterlist.h>
8#include <linux/delay.h>
9#include <linux/nvme.h>
10#include <linux/nvme-fc.h>
2b2af50a
SK
11#include <linux/blk-mq-pci.h>
12#include <linux/blk-mq.h>
e84067d7
DG
13
14static struct nvme_fc_port_template qla_nvme_fc_transport;
15
0f7e51f6 16int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
e84067d7 17{
9dd9686b
DT
18 struct qla_nvme_rport *rport;
19 struct nvme_fc_port_info req;
e84067d7
DG
20 int ret;
21
bcda771b
AB
22 if (!IS_ENABLED(CONFIG_NVME_FC))
23 return 0;
24
e84067d7
DG
25 if (!vha->flags.nvme_enabled) {
26 ql_log(ql_log_info, vha, 0x2100,
27 "%s: Not registering target since Host NVME is not enabled\n",
28 __func__);
29 return 0;
30 }
31
8777e431
QT
32 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
33 return 0;
34
e84067d7 35 if (!(fcport->nvme_prli_service_param &
9dd9686b
DT
36 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
37 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
e84067d7
DG
38 return 0;
39
870fe24f 40 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
e84067d7 41
9dd9686b
DT
42 memset(&req, 0, sizeof(struct nvme_fc_port_info));
43 req.port_name = wwn_to_u64(fcport->port_name);
44 req.node_name = wwn_to_u64(fcport->node_name);
45 req.port_role = 0;
baea0e83 46 req.dev_loss_tmo = fcport->dev_loss_tmo;
e84067d7
DG
47
48 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
9dd9686b 49 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
e84067d7
DG
50
51 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
9dd9686b 52 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
e84067d7
DG
53
54 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
9dd9686b 55 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
e84067d7 56
9dd9686b 57 req.port_id = fcport->d_id.b24;
e84067d7
DG
58
59 ql_log(ql_log_info, vha, 0x2102,
d7936a96 60 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
9dd9686b
DT
61 __func__, req.node_name, req.port_name,
62 req.port_id);
e84067d7 63
9dd9686b 64 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
e84067d7
DG
65 &fcport->nvme_remote_port);
66 if (ret) {
67 ql_log(ql_log_warn, vha, 0x212e,
68 "Failed to register remote port. Transport returned %d\n",
69 ret);
70 return ret;
71 }
72
baea0e83
HR
73 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
74 fcport->dev_loss_tmo);
75
cf3c54fb
SK
76 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
77 ql_log(ql_log_info, vha, 0x212a,
78 "PortID:%06x Supports SLER\n", req.port_id);
79
80 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
81 ql_log(ql_log_info, vha, 0x212b,
82 "PortID:%06x Supports PI control\n", req.port_id);
83
9dd9686b 84 rport = fcport->nvme_remote_port->private;
e84067d7 85 rport->fcport = fcport;
9dd9686b
DT
86
87 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
e84067d7
DG
88 return 0;
89}
90
91/* Allocate a queue for NVMe traffic */
6fcd98fd 92static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
93 unsigned int qidx, u16 qsize, void **handle)
e84067d7
DG
94{
95 struct scsi_qla_host *vha;
96 struct qla_hw_data *ha;
97 struct qla_qpair *qpair;
98
c8fadf01
SK
99 /* Map admin queue and 1st IO queue to index 0 */
100 if (qidx)
101 qidx--;
e84067d7
DG
102
103 vha = (struct scsi_qla_host *)lport->private;
104 ha = vha->hw;
105
106 ql_log(ql_log_info, vha, 0x2104,
107 "%s: handle %p, idx =%d, qsize %d\n",
108 __func__, handle, qidx, qsize);
109
110 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
111 ql_log(ql_log_warn, vha, 0x212f,
112 "%s: Illegal qidx=%d. Max=%d\n",
113 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
114 return -EINVAL;
115 }
116
4a0a542f
SK
117 /* Use base qpair if max_qpairs is 0 */
118 if (!ha->max_qpairs) {
119 qpair = ha->base_qpair;
120 } else {
121 if (ha->queue_pair_map[qidx]) {
122 *handle = ha->queue_pair_map[qidx];
123 ql_log(ql_log_info, vha, 0x2121,
124 "Returning existing qpair of %p for idx=%x\n",
125 *handle, qidx);
126 return 0;
127 }
e84067d7 128
4a0a542f
SK
129 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
130 if (!qpair) {
131 ql_log(ql_log_warn, vha, 0x2122,
132 "Failed to allocate qpair\n");
133 return -EINVAL;
134 }
efa74a62 135 qla_adjust_iocb_limit(vha);
e84067d7
DG
136 }
137 *handle = qpair;
138
139 return 0;
140}
141
4c2a2d01
QT
142static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
143{
144 struct srb *sp = container_of(kref, struct srb, cmd_kref);
145 struct nvme_private *priv = (struct nvme_private *)sp->priv;
146 struct nvmefc_fcp_req *fd;
147 struct srb_iocb *nvme;
148 unsigned long flags;
149
150 if (!priv)
151 goto out;
152
153 nvme = &sp->u.iocb_cmd;
154 fd = nvme->u.nvme.desc;
155
156 spin_lock_irqsave(&priv->cmd_lock, flags);
157 priv->sp = NULL;
158 sp->priv = NULL;
159 if (priv->comp_status == QLA_SUCCESS) {
7ffa5b93 160 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
ef2e3ec5 161 fd->status = NVME_SC_SUCCESS;
4c2a2d01
QT
162 } else {
163 fd->rcv_rsplen = 0;
164 fd->transferred_length = 0;
ef2e3ec5 165 fd->status = NVME_SC_INTERNAL;
4c2a2d01 166 }
4c2a2d01
QT
167 spin_unlock_irqrestore(&priv->cmd_lock, flags);
168
169 fd->done(fd);
170out:
171 qla2xxx_rel_qpair_sp(sp->qpair, sp);
172}
173
174static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
175{
176 struct srb *sp = container_of(kref, struct srb, cmd_kref);
177 struct nvme_private *priv = (struct nvme_private *)sp->priv;
178 struct nvmefc_ls_req *fd;
179 unsigned long flags;
180
181 if (!priv)
182 goto out;
183
184 spin_lock_irqsave(&priv->cmd_lock, flags);
185 priv->sp = NULL;
186 sp->priv = NULL;
187 spin_unlock_irqrestore(&priv->cmd_lock, flags);
188
189 fd = priv->fd;
c85ab7d9 190
4c2a2d01
QT
191 fd->done(fd, priv->comp_status);
192out:
193 qla2x00_rel_sp(sp);
194}
195
196static void qla_nvme_ls_complete(struct work_struct *work)
197{
198 struct nvme_private *priv =
199 container_of(work, struct nvme_private, ls_work);
200
201 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
202}
203
6c18a43e 204static void qla_nvme_sp_ls_done(srb_t *sp, int res)
e84067d7 205{
6c18a43e 206 struct nvme_private *priv = sp->priv;
e84067d7 207
4c2a2d01 208 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
e84067d7 209 return;
e84067d7 210
e84067d7
DG
211 if (res)
212 res = -EINVAL;
213
e84067d7 214 priv->comp_status = res;
4c2a2d01 215 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
e84067d7 216 schedule_work(&priv->ls_work);
e84067d7
DG
217}
218
4c2a2d01 219/* it assumed that QPair lock is held. */
6c18a43e 220static void qla_nvme_sp_done(srb_t *sp, int res)
e84067d7 221{
6c18a43e 222 struct nvme_private *priv = sp->priv;
e84067d7 223
4c2a2d01
QT
224 priv->comp_status = res;
225 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
9dd9686b 226
cf19c45d 227 return;
e84067d7
DG
228}
229
e473b307 230static void qla_nvme_abort_work(struct work_struct *work)
e84067d7 231{
e473b307
DT
232 struct nvme_private *priv =
233 container_of(work, struct nvme_private, abort_work);
e84067d7 234 srb_t *sp = priv->sp;
e473b307 235 fc_port_t *fcport = sp->fcport;
e84067d7 236 struct qla_hw_data *ha = fcport->vha->hw;
2cabf10d 237 int rval, abts_done_called = 1;
e6e22e6c
AE
238 bool io_wait_for_abort_done;
239 uint32_t handle;
e84067d7 240
dafbe56f 241 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
2cabf10d
AE
242 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
243 __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
471f8e03 244
310e69ed 245 if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
4c2a2d01 246 goto out;
471f8e03 247
b2d1453a
GM
248 if (ha->flags.host_shutting_down) {
249 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
f45bca8c
QT
250 "%s Calling done on sp: %p, type: 0x%x\n",
251 __func__, sp, sp->type);
b2d1453a 252 sp->done(sp, 0);
4c2a2d01 253 goto out;
b2d1453a
GM
254 }
255
e6e22e6c
AE
256 /*
257 * sp may not be valid after abort_command if return code is either
258 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
259 */
260 io_wait_for_abort_done = ql2xabts_wait_nvme &&
261 QLA_ABTS_WAIT_ENABLED(sp);
262 handle = sp->handle;
263
e84067d7 264 rval = ha->isp_ops->abort_command(sp);
e84067d7
DG
265
266 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
870fe24f
DT
267 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
268 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
e6e22e6c 269 sp, handle, fcport, rval);
4c2a2d01 270
2cabf10d
AE
271 /*
272 * If async tmf is enabled, the abort callback is called only on
273 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
274 */
275 if (ql2xasynctmfenable &&
276 rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
277 abts_done_called = 0;
278
a0465859
BH
279 /*
280 * Returned before decreasing kref so that I/O requests
281 * are waited until ABTS complete. This kref is decreased
282 * at qla24xx_abort_sp_done function.
283 */
e6e22e6c 284 if (abts_done_called && io_wait_for_abort_done)
a0465859 285 return;
4c2a2d01
QT
286out:
287 /* kref_get was done before work was schedule. */
288 kref_put(&sp->cmd_kref, sp->put_fn);
e84067d7
DG
289}
290
e473b307
DT
291static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
292 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
293{
294 struct nvme_private *priv = fd->private;
4c2a2d01
QT
295 unsigned long flags;
296
297 spin_lock_irqsave(&priv->cmd_lock, flags);
298 if (!priv->sp) {
299 spin_unlock_irqrestore(&priv->cmd_lock, flags);
300 return;
301 }
302
303 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
304 spin_unlock_irqrestore(&priv->cmd_lock, flags);
305 return;
306 }
307 spin_unlock_irqrestore(&priv->cmd_lock, flags);
e473b307
DT
308
309 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
310 schedule_work(&priv->abort_work);
311}
312
e84067d7
DG
313static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
314 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
315{
9dd9686b
DT
316 struct qla_nvme_rport *qla_rport = rport->private;
317 fc_port_t *fcport = qla_rport->fcport;
e84067d7
DG
318 struct srb_iocb *nvme;
319 struct nvme_private *priv = fd->private;
320 struct scsi_qla_host *vha;
321 int rval = QLA_FUNCTION_FAILED;
322 struct qla_hw_data *ha;
323 srb_t *sp;
324
99de0ea0 325 if (!fcport || fcport->deleted)
2eb9238a
QT
326 return rval;
327
e84067d7
DG
328 vha = fcport->vha;
329 ha = vha->hw;
2eb9238a
QT
330
331 if (!ha->flags.fw_started)
332 return rval;
333
e84067d7
DG
334 /* Alloc SRB structure */
335 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
336 if (!sp)
337 return rval;
338
339 sp->type = SRB_NVME_LS;
340 sp->name = "nvme_ls";
341 sp->done = qla_nvme_sp_ls_done;
4c2a2d01 342 sp->put_fn = qla_nvme_release_ls_cmd_kref;
ab053c09 343 sp->priv = priv;
e84067d7 344 priv->sp = sp;
4c2a2d01
QT
345 kref_init(&sp->cmd_kref);
346 spin_lock_init(&priv->cmd_lock);
347 nvme = &sp->u.iocb_cmd;
e84067d7 348 priv->fd = fd;
e84067d7
DG
349 nvme->u.nvme.desc = fd;
350 nvme->u.nvme.dir = 0;
351 nvme->u.nvme.dl = 0;
352 nvme->u.nvme.cmd_len = fd->rqstlen;
353 nvme->u.nvme.rsp_len = fd->rsplen;
354 nvme->u.nvme.rsp_dma = fd->rspdma;
355 nvme->u.nvme.timeout_sec = fd->timeout;
c75e6aef 356 nvme->u.nvme.cmd_dma = fd->rqstdma;
e84067d7
DG
357 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
358 fd->rqstlen, DMA_TO_DEVICE);
359
360 rval = qla2x00_start_sp(sp);
361 if (rval != QLA_SUCCESS) {
362 ql_log(ql_log_warn, vha, 0x700e,
363 "qla2x00_start_sp failed = %d\n", rval);
4c2a2d01
QT
364 sp->priv = NULL;
365 priv->sp = NULL;
366 qla2x00_rel_sp(sp);
e84067d7
DG
367 return rval;
368 }
369
370 return rval;
371}
372
373static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
374 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
375 struct nvmefc_fcp_req *fd)
376{
377 struct nvme_private *priv = fd->private;
4c2a2d01
QT
378 unsigned long flags;
379
380 spin_lock_irqsave(&priv->cmd_lock, flags);
381 if (!priv->sp) {
382 spin_unlock_irqrestore(&priv->cmd_lock, flags);
383 return;
384 }
385 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
386 spin_unlock_irqrestore(&priv->cmd_lock, flags);
387 return;
388 }
389 spin_unlock_irqrestore(&priv->cmd_lock, flags);
e84067d7 390
e473b307
DT
391 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
392 schedule_work(&priv->abort_work);
e84067d7
DG
393}
394
60dd6e8e 395static inline int qla2x00_start_nvme_mq(srb_t *sp)
e84067d7
DG
396{
397 unsigned long flags;
398 uint32_t *clr_ptr;
e84067d7
DG
399 uint32_t handle;
400 struct cmd_nvme *cmd_pkt;
401 uint16_t cnt, i;
402 uint16_t req_cnt;
403 uint16_t tot_dsds;
404 uint16_t avail_dsds;
15b7a68c 405 struct dsd64 *cur_dsd;
e84067d7 406 struct req_que *req = NULL;
38c61709 407 struct rsp_que *rsp = NULL;
e84067d7
DG
408 struct scsi_qla_host *vha = sp->fcport->vha;
409 struct qla_hw_data *ha = vha->hw;
410 struct qla_qpair *qpair = sp->qpair;
411 struct srb_iocb *nvme = &sp->u.iocb_cmd;
412 struct scatterlist *sgl, *sg;
413 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
cf3c54fb 414 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
e84067d7
DG
415 uint32_t rval = QLA_SUCCESS;
416
1d4614e1 417 /* Setup qpair pointers */
418 req = qpair->req;
38c61709 419 rsp = qpair->rsp;
e84067d7
DG
420 tot_dsds = fd->sg_cnt;
421
422 /* Acquire qpair specific lock */
423 spin_lock_irqsave(&qpair->qp_lock, flags);
424
bcc85657
BVA
425 handle = qla2xxx_get_next_handle(req);
426 if (handle == 0) {
870fe24f 427 rval = -EBUSY;
e84067d7
DG
428 goto queuing_error;
429 }
430 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
41e5afe5
QT
431
432 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
433 sp->iores.exch_cnt = 1;
434 sp->iores.iocb_cnt = req_cnt;
435 if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
436 rval = -EBUSY;
437 goto queuing_error;
438 }
439
e84067d7 440 if (req->cnt < (req_cnt + 2)) {
f7a0ed47
QT
441 if (IS_SHADOW_REG_CAPABLE(ha)) {
442 cnt = *req->out_ptr;
443 } else {
444 cnt = rd_reg_dword_relaxed(req->req_q_out);
41e5afe5
QT
445 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
446 rval = -EBUSY;
f7a0ed47 447 goto queuing_error;
41e5afe5 448 }
f7a0ed47 449 }
e84067d7
DG
450
451 if (req->ring_index < cnt)
452 req->cnt = cnt - req->ring_index;
453 else
454 req->cnt = req->length - (req->ring_index - cnt);
455
456 if (req->cnt < (req_cnt + 2)){
870fe24f 457 rval = -EBUSY;
e84067d7
DG
458 goto queuing_error;
459 }
460 }
461
462 if (unlikely(!fd->sqid)) {
e84067d7
DG
463 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
464 nvme->u.nvme.aen_op = 1;
1d4614e1 465 atomic_inc(&ha->nvme_active_aen_cnt);
e84067d7
DG
466 }
467 }
468
469 /* Build command packet. */
470 req->current_outstanding_cmd = handle;
471 req->outstanding_cmds[handle] = sp;
472 sp->handle = handle;
473 req->cnt -= req_cnt;
474
475 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
c25eb70a 476 cmd_pkt->handle = make_handle(req->id, handle);
e84067d7
DG
477
478 /* Zero out remaining portion of packet. */
479 clr_ptr = (uint32_t *)cmd_pkt + 2;
480 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
481
482 cmd_pkt->entry_status = 0;
483
484 /* Update entry type to indicate Command NVME IOCB */
485 cmd_pkt->entry_type = COMMAND_NVME;
486
487 /* No data transfer how do we check buffer len == 0?? */
488 if (fd->io_dir == NVMEFC_FCP_READ) {
7ffa5b93 489 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
49db4d4e
QT
490 qpair->counters.input_bytes += fd->payload_length;
491 qpair->counters.input_requests++;
e84067d7 492 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
7ffa5b93 493 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
03aaa89f
DT
494 if ((vha->flags.nvme_first_burst) &&
495 (sp->fcport->nvme_prli_service_param &
496 NVME_PRLI_SP_FIRST_BURST)) {
497 if ((fd->payload_length <=
498 sp->fcport->nvme_first_burst_size) ||
499 (sp->fcport->nvme_first_burst_size == 0))
500 cmd_pkt->control_flags |=
7ffa5b93 501 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
03aaa89f 502 }
49db4d4e
QT
503 qpair->counters.output_bytes += fd->payload_length;
504 qpair->counters.output_requests++;
e84067d7 505 } else if (fd->io_dir == 0) {
03aaa89f 506 cmd_pkt->control_flags = 0;
e84067d7 507 }
44d01857
QT
508
509 if (sp->fcport->edif.enable && fd->io_dir != 0)
510 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
511
cf3c54fb
SK
512 /* Set BIT_13 of control flags for Async event */
513 if (vha->flags.nvme2_enabled &&
514 cmd->sqe.common.opcode == nvme_admin_async_event) {
515 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
516 }
e84067d7
DG
517
518 /* Set NPORT-ID */
519 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
520 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
521 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
522 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
523 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
524
525 /* NVME RSP IU */
526 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
d4556a49 527 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
e84067d7
DG
528
529 /* NVME CNMD IU */
530 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
d4556a49 531 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
e84067d7
DG
532
533 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
534 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
535
536 /* One DSD is available in the Command Type NVME IOCB */
537 avail_dsds = 1;
15b7a68c 538 cur_dsd = &cmd_pkt->nvme_dsd;
e84067d7
DG
539 sgl = fd->first_sgl;
540
541 /* Load data segments */
542 for_each_sg(sgl, sg, tot_dsds, i) {
e84067d7
DG
543 cont_a64_entry_t *cont_pkt;
544
545 /* Allocate additional continuation packets? */
546 if (avail_dsds == 0) {
547 /*
548 * Five DSDs are available in the Continuation
549 * Type 1 IOCB.
550 */
551
552 /* Adjust ring index */
553 req->ring_index++;
554 if (req->ring_index == req->length) {
555 req->ring_index = 0;
556 req->ring_ptr = req->ring;
557 } else {
558 req->ring_ptr++;
559 }
560 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
2c26348c
BVA
561 put_unaligned_le32(CONTINUE_A64_TYPE,
562 &cont_pkt->entry_type);
e84067d7 563
15b7a68c
BVA
564 cur_dsd = cont_pkt->dsd;
565 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
e84067d7
DG
566 }
567
15b7a68c 568 append_dsd64(&cur_dsd, sg);
e84067d7
DG
569 avail_dsds--;
570 }
571
572 /* Set total entry count. */
573 cmd_pkt->entry_count = (uint8_t)req_cnt;
574 wmb();
575
576 /* Adjust ring index. */
577 req->ring_index++;
578 if (req->ring_index == req->length) {
579 req->ring_index = 0;
580 req->ring_ptr = req->ring;
581 } else {
582 req->ring_ptr++;
583 }
584
d94d8158
QT
585 /* ignore nvme async cmd due to long timeout */
586 if (!nvme->u.nvme.aen_op)
587 sp->qpair->cmd_cnt++;
588
e84067d7 589 /* Set chip new ring index. */
04474d3a 590 wrt_reg_dword(req->req_q_in, req->ring_index);
e84067d7 591
38c61709
SD
592 if (vha->flags.process_response_queue &&
593 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
594 qla24xx_process_response_queue(vha, rsp);
595
e84067d7 596queuing_error:
41e5afe5
QT
597 if (rval)
598 qla_put_fw_resources(sp->qpair, &sp->iores);
e84067d7 599 spin_unlock_irqrestore(&qpair->qp_lock, flags);
f7a0ed47 600
e84067d7
DG
601 return rval;
602}
603
604/* Post a command */
605static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
606 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
607 struct nvmefc_fcp_req *fd)
608{
609 fc_port_t *fcport;
610 struct srb_iocb *nvme;
611 struct scsi_qla_host *vha;
1d201c81 612 struct qla_hw_data *ha;
b9947187 613 int rval;
e84067d7 614 srb_t *sp;
6fcd98fd 615 struct qla_qpair *qpair = hw_queue_handle;
5e6803b4 616 struct nvme_private *priv = fd->private;
9dd9686b 617 struct qla_nvme_rport *qla_rport = rport->private;
e84067d7 618
83949613
QT
619 if (!priv) {
620 /* nvme association has been torn down */
b9947187 621 return -ENODEV;
83949613
QT
622 }
623
9dd9686b 624 fcport = qla_rport->fcport;
e84067d7 625
0ce8ab50 626 if (unlikely(!qpair || !fcport || fcport->deleted))
305c16ce
DW
627 return -EBUSY;
628
a35f87bd 629 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
1ef16a40 630 return -ENODEV;
a35f87bd 631
0ce8ab50 632 vha = fcport->vha;
1d201c81 633 ha = vha->hw;
0ce8ab50
SK
634
635 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
a35f87bd
AE
636 return -EBUSY;
637
870fe24f
DT
638 /*
639 * If we know the dev is going away while the transport is still sending
640 * IO's return busy back to stall the IO Q. This happens when the
641 * link goes away and fw hasn't notified us yet, but IO's are being
642 * returned. If the dev comes back quickly we won't exhaust the IO
643 * retry count at the core.
644 */
645 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
e84067d7
DG
646 return -EBUSY;
647
1d201c81
SD
648 qpair = qla_mapq_nvme_select_qpair(ha, qpair);
649
e84067d7 650 /* Alloc SRB structure */
6a629468 651 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
e84067d7 652 if (!sp)
870fe24f 653 return -EBUSY;
e84067d7 654
4c2a2d01
QT
655 kref_init(&sp->cmd_kref);
656 spin_lock_init(&priv->cmd_lock);
ab053c09 657 sp->priv = priv;
e84067d7
DG
658 priv->sp = sp;
659 sp->type = SRB_NVME_CMD;
660 sp->name = "nvme_cmd";
661 sp->done = qla_nvme_sp_done;
4c2a2d01 662 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
e84067d7 663 sp->qpair = qpair;
5e6803b4 664 sp->vha = vha;
a0465859 665 sp->cmd_sp = sp;
e84067d7
DG
666 nvme = &sp->u.iocb_cmd;
667 nvme->u.nvme.desc = fd;
668
669 rval = qla2x00_start_nvme_mq(sp);
670 if (rval != QLA_SUCCESS) {
671 ql_log(ql_log_warn, vha, 0x212d,
672 "qla2x00_start_nvme_mq failed = %d\n", rval);
4c2a2d01
QT
673 sp->priv = NULL;
674 priv->sp = NULL;
675 qla2xxx_rel_qpair_sp(sp->qpair, sp);
e84067d7
DG
676 }
677
678 return rval;
679}
680
2b2af50a
SK
681static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
682 struct blk_mq_queue_map *map)
683{
684 struct scsi_qla_host *vha = lport->private;
2b2af50a 685
a4e1d0b7 686 blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
2b2af50a
SK
687}
688
e84067d7
DG
689static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
690{
691 struct scsi_qla_host *vha = lport->private;
692
e84067d7
DG
693 ql_log(ql_log_info, vha, 0x210f,
694 "localport delete of %p completed.\n", vha->nvme_local_port);
695 vha->nvme_local_port = NULL;
5621b0dd 696 complete(&vha->nvme_del_done);
e84067d7
DG
697}
698
699static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
700{
701 fc_port_t *fcport;
6a81533d 702 struct qla_nvme_rport *qla_rport = rport->private;
e84067d7 703
9dd9686b 704 fcport = qla_rport->fcport;
e84067d7
DG
705 fcport->nvme_remote_port = NULL;
706 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
5e6803b4 707 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
e84067d7 708 ql_log(ql_log_info, fcport->vha, 0x2110,
2eb9238a
QT
709 "remoteport_delete of %p %8phN completed.\n",
710 fcport, fcport->port_name);
baf23edd 711 complete(&fcport->nvme_del_done);
e84067d7
DG
712}
713
714static struct nvme_fc_port_template qla_nvme_fc_transport = {
715 .localport_delete = qla_nvme_localport_delete,
716 .remoteport_delete = qla_nvme_remoteport_delete,
717 .create_queue = qla_nvme_alloc_queue,
718 .delete_queue = NULL,
719 .ls_req = qla_nvme_ls_req,
720 .ls_abort = qla_nvme_ls_abort,
721 .fcp_io = qla_nvme_post_cmd,
722 .fcp_abort = qla_nvme_fcp_abort,
2b2af50a 723 .map_queues = qla_nvme_map_queues,
65120de2 724 .max_hw_queues = DEF_NVME_HW_QUEUES,
6b1f4446 725 .max_sgl_segments = 1024,
e84067d7
DG
726 .max_dif_sgl_segments = 64,
727 .dma_boundary = 0xFFFFFFFF,
728 .local_priv_sz = 8,
9dd9686b 729 .remote_priv_sz = sizeof(struct qla_nvme_rport),
e84067d7
DG
730 .lsrqst_priv_sz = sizeof(struct nvme_private),
731 .fcprqst_priv_sz = sizeof(struct nvme_private),
732};
733
baf23edd 734void qla_nvme_unregister_remote_port(struct fc_port *fcport)
e84067d7 735{
6a81533d 736 int ret;
e84067d7 737
bcda771b
AB
738 if (!IS_ENABLED(CONFIG_NVME_FC))
739 return;
740
27c707b1 741 ql_log(ql_log_warn, fcport->vha, 0x2112,
2eb9238a
QT
742 "%s: unregister remoteport on %p %8phN\n",
743 __func__, fcport, fcport->port_name);
49b3d5f6 744
03cc44bf
QT
745 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
746 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
747
6a81533d
AE
748 init_completion(&fcport->nvme_del_done);
749 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
750 if (ret)
751 ql_log(ql_log_info, fcport->vha, 0x2114,
752 "%s: Failed to unregister nvme_remote_port (%d)\n",
753 __func__, ret);
754 wait_for_completion(&fcport->nvme_del_done);
e84067d7
DG
755}
756
0f7e51f6 757void qla_nvme_delete(struct scsi_qla_host *vha)
e84067d7 758{
e84067d7
DG
759 int nv_ret;
760
bcda771b
AB
761 if (!IS_ENABLED(CONFIG_NVME_FC))
762 return;
763
e84067d7 764 if (vha->nvme_local_port) {
5621b0dd 765 init_completion(&vha->nvme_del_done);
9dd9686b
DT
766 ql_log(ql_log_info, vha, 0x2116,
767 "unregister localport=%p\n",
768 vha->nvme_local_port);
e84067d7 769 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
9dd9686b 770 if (nv_ret)
e84067d7
DG
771 ql_log(ql_log_info, vha, 0x2115,
772 "Unregister of localport failed\n");
9dd9686b
DT
773 else
774 wait_for_completion(&vha->nvme_del_done);
e84067d7 775 }
e84067d7
DG
776}
777
8777e431 778int qla_nvme_register_hba(struct scsi_qla_host *vha)
e84067d7 779{
e84067d7
DG
780 struct nvme_fc_port_template *tmpl;
781 struct qla_hw_data *ha;
782 struct nvme_fc_port_info pinfo;
ca4fb89a 783 int ret = -EINVAL;
e84067d7 784
bcda771b 785 if (!IS_ENABLED(CONFIG_NVME_FC))
8777e431 786 return ret;
bcda771b 787
e84067d7
DG
788 ha = vha->hw;
789 tmpl = &qla_nvme_fc_transport;
790
3648bcf1 791 if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
65120de2 792 ql_log(ql_log_warn, vha, 0xfffd,
3648bcf1
SD
793 "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
794 ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
65120de2 795 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
3648bcf1
SD
796 } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
797 ql_log(ql_log_warn, vha, 0xfffd,
798 "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
799 ql2xnvme_queues, (ha->max_qpairs - 1),
800 (ha->max_qpairs - 1));
801 ql2xnvme_queues = ((ha->max_qpairs - 1));
65120de2
SD
802 }
803
e84067d7 804 qla_nvme_fc_transport.max_hw_queues =
65120de2 805 min((uint8_t)(ql2xnvme_queues),
3648bcf1 806 (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
e84067d7 807
65120de2 808 ql_log(ql_log_info, vha, 0xfffb,
db212f2e 809 "Number of NVME queues used for this port: %d\n",
65120de2
SD
810 qla_nvme_fc_transport.max_hw_queues);
811
e84067d7
DG
812 pinfo.node_name = wwn_to_u64(vha->node_name);
813 pinfo.port_name = wwn_to_u64(vha->port_name);
814 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
815 pinfo.port_id = vha->d_id.b24;
816
db212f2e
AE
817 mutex_lock(&ha->vport_lock);
818 /*
819 * Check again for nvme_local_port to see if any other thread raced
820 * with this one and finished registration.
821 */
822 if (!vha->nvme_local_port) {
823 ql_log(ql_log_info, vha, 0xffff,
824 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
825 pinfo.node_name, pinfo.port_name, pinfo.port_id);
826 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
827
828 ret = nvme_fc_register_localport(&pinfo, tmpl,
829 get_device(&ha->pdev->dev),
830 &vha->nvme_local_port);
831 mutex_unlock(&ha->vport_lock);
832 } else {
833 mutex_unlock(&ha->vport_lock);
834 return 0;
835 }
e84067d7
DG
836 if (ret) {
837 ql_log(ql_log_warn, vha, 0xffff,
838 "register_localport failed: ret=%x\n", ret);
8777e431
QT
839 } else {
840 vha->nvme_local_port->private = vha;
e84067d7 841 }
8777e431
QT
842
843 return ret;
e84067d7 844}
a0465859
BH
845
846void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
847{
848 struct qla_hw_data *ha;
849
850 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
851 return;
852
853 ha = orig_sp->fcport->vha->hw;
854
855 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
856 /* Use Driver Specified Retry Count */
857 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
858 abt->drv.abts_rty_cnt = cpu_to_le16(2);
859 /* Use specified response timeout */
860 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
861 /* set it to 2 * r_a_tov in secs */
862 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
863}
864
865void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
866{
867 u16 comp_status;
868 struct scsi_qla_host *vha;
869
870 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
871 return;
872
873 vha = orig_sp->fcport->vha;
874
875 comp_status = le16_to_cpu(abt->comp_status);
876 switch (comp_status) {
877 case CS_RESET: /* reset event aborted */
878 case CS_ABORTED: /* IOCB was cleaned */
879 /* N_Port handle is not currently logged in */
880 case CS_TIMEOUT:
881 /* N_Port handle was logged out while waiting for ABTS to complete */
882 case CS_PORT_UNAVAILABLE:
883 /* Firmware found that the port name changed */
884 case CS_PORT_LOGGED_OUT:
885 /* BA_RJT was received for the ABTS */
886 case CS_PORT_CONFIG_CHG:
2cabf10d 887 ql_dbg(ql_dbg_async, vha, 0xf09d,
a0465859
BH
888 "Abort I/O IOCB completed with error, comp_status=%x\n",
889 comp_status);
890 break;
891
892 /* BA_RJT was received for the ABTS */
893 case CS_REJECT_RECEIVED:
2cabf10d 894 ql_dbg(ql_dbg_async, vha, 0xf09e,
a0465859
BH
895 "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
896 abt->fw.ba_rjt_vendorUnique);
897 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
898 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
899 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
900 break;
901
902 case CS_COMPLETE:
2cabf10d 903 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
a0465859
BH
904 "IOCB request is completed successfully comp_status=%x\n",
905 comp_status);
906 break;
907
908 case CS_IOCB_ERROR:
2cabf10d 909 ql_dbg(ql_dbg_async, vha, 0xf0a0,
a0465859
BH
910 "IOCB request is failed, comp_status=%x\n", comp_status);
911 break;
912
913 default:
2cabf10d 914 ql_dbg(ql_dbg_async, vha, 0xf0a1,
a0465859
BH
915 "Invalid Abort IO IOCB Completion Status %x\n",
916 comp_status);
917 break;
918 }
919}
920
921inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
922{
923 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
924 return;
925 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
926}