2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static int ql2xtgt_tape_enable;
46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
51 module_param(qlini_mode, charp, S_IRUGO);
52 MODULE_PARM_DESC(qlini_mode,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
60 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
62 static int temp_sam_status = SAM_STAT_BUSY;
65 * From scsi/fc/fc_fcp.h
67 enum fcp_resp_rsp_codes {
69 FCP_DATA_LEN_INVALID = 1,
70 FCP_CMND_FIELDS_INVALID = 2,
71 FCP_DATA_PARAM_MISMATCH = 3,
74 FCP_TMF_INVALID_LUN = 9,
78 * fc_pri_ta from scsi/fc/fc_fcp.h
80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
84 #define FCP_PTA_MASK 7 /* mask for task attribute field */
85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
103 struct atio_from_isp *pkt);
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull);
115 static void qlt_disable_vha(struct scsi_qla_host *vha);
116 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
124 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
125 static mempool_t *qla_tgt_mgmt_cmd_mempool;
126 static struct workqueue_struct *qla_tgt_wq;
127 static DEFINE_MUTEX(qla_tgt_mutex);
128 static LIST_HEAD(qla_tgt_glist);
130 /* This API intentionally takes dest as a parameter, rather than returning
131 * int value to avoid caller forgetting to issue wmb() after the store */
132 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
134 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
135 *dest = atomic_inc_return(&base_vha->generation_tick);
140 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
141 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
143 const uint8_t *port_name)
145 struct qla_tgt_sess *sess;
147 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
148 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
155 /* Might release hw lock, then reaquire!! */
156 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
158 /* Send marker if required */
159 if (unlikely(vha->marker_needed != 0)) {
160 int rc = qla2x00_issue_marker(vha, vha_locked);
161 if (rc != QLA_SUCCESS) {
162 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
163 "qla_target(%d): issue_marker() failed\n",
172 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
175 struct qla_hw_data *ha = vha->hw;
178 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
181 if (vha->d_id.b.al_pa == d_id[2])
184 BUG_ON(ha->tgt.tgt_vp_map == NULL);
185 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
186 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
187 return ha->tgt.tgt_vp_map[vp_idx].vha;
193 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
196 struct qla_hw_data *ha = vha->hw;
198 if (vha->vp_idx == vp_idx)
201 BUG_ON(ha->tgt.tgt_vp_map == NULL);
202 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
203 return ha->tgt.tgt_vp_map[vp_idx].vha;
208 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
212 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
214 vha->hw->tgt.num_pend_cmds++;
215 if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
216 vha->hw->qla_stats.stat_max_pend_cmds =
217 vha->hw->tgt.num_pend_cmds;
218 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
220 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
224 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
225 vha->hw->tgt.num_pend_cmds--;
226 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
229 static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
230 struct atio_from_isp *atio)
232 ql_dbg(ql_dbg_tgt, vha, 0xe072,
233 "%s: qla_target(%d): type %x ox_id %04x\n",
234 __func__, vha->vp_idx, atio->u.raw.entry_type,
235 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
237 switch (atio->u.raw.entry_type) {
240 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
241 atio->u.isp24.fcp_hdr.d_id);
242 if (unlikely(NULL == host)) {
243 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
244 "qla_target(%d): Received ATIO_TYPE7 "
245 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
246 atio->u.isp24.fcp_hdr.d_id[0],
247 atio->u.isp24.fcp_hdr.d_id[1],
248 atio->u.isp24.fcp_hdr.d_id[2]);
251 qlt_24xx_atio_pkt(host, atio);
255 case IMMED_NOTIFY_TYPE:
257 struct scsi_qla_host *host = vha;
258 struct imm_ntfy_from_isp *entry =
259 (struct imm_ntfy_from_isp *)atio;
261 if ((entry->u.isp24.vp_index != 0xFF) &&
262 (entry->u.isp24.nport_handle != 0xFFFF)) {
263 host = qlt_find_host_by_vp_idx(vha,
264 entry->u.isp24.vp_index);
265 if (unlikely(!host)) {
266 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
267 "qla_target(%d): Received "
268 "ATIO (IMMED_NOTIFY_TYPE) "
269 "with unknown vp_index %d\n",
270 vha->vp_idx, entry->u.isp24.vp_index);
274 qlt_24xx_atio_pkt(host, atio);
279 ql_dbg(ql_dbg_tgt, vha, 0xe040,
280 "qla_target(%d): Received unknown ATIO atio "
281 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
288 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
290 switch (pkt->entry_type) {
292 ql_dbg(ql_dbg_tgt, vha, 0xe073,
293 "qla_target(%d):%s: CRC2 Response pkt\n",
294 vha->vp_idx, __func__);
297 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
298 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
300 if (unlikely(!host)) {
301 ql_dbg(ql_dbg_tgt, vha, 0xe041,
302 "qla_target(%d): Response pkt (CTIO_TYPE7) "
303 "received, with unknown vp_index %d\n",
304 vha->vp_idx, entry->vp_index);
307 qlt_response_pkt(host, pkt);
311 case IMMED_NOTIFY_TYPE:
313 struct scsi_qla_host *host = vha;
314 struct imm_ntfy_from_isp *entry =
315 (struct imm_ntfy_from_isp *)pkt;
317 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
318 if (unlikely(!host)) {
319 ql_dbg(ql_dbg_tgt, vha, 0xe042,
320 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
321 "received, with unknown vp_index %d\n",
322 vha->vp_idx, entry->u.isp24.vp_index);
325 qlt_response_pkt(host, pkt);
329 case NOTIFY_ACK_TYPE:
331 struct scsi_qla_host *host = vha;
332 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
334 if (0xFF != entry->u.isp24.vp_index) {
335 host = qlt_find_host_by_vp_idx(vha,
336 entry->u.isp24.vp_index);
337 if (unlikely(!host)) {
338 ql_dbg(ql_dbg_tgt, vha, 0xe043,
339 "qla_target(%d): Response "
340 "pkt (NOTIFY_ACK_TYPE) "
341 "received, with unknown "
342 "vp_index %d\n", vha->vp_idx,
343 entry->u.isp24.vp_index);
347 qlt_response_pkt(host, pkt);
353 struct abts_recv_from_24xx *entry =
354 (struct abts_recv_from_24xx *)pkt;
355 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
357 if (unlikely(!host)) {
358 ql_dbg(ql_dbg_tgt, vha, 0xe044,
359 "qla_target(%d): Response pkt "
360 "(ABTS_RECV_24XX) received, with unknown "
361 "vp_index %d\n", vha->vp_idx, entry->vp_index);
364 qlt_response_pkt(host, pkt);
370 struct abts_resp_to_24xx *entry =
371 (struct abts_resp_to_24xx *)pkt;
372 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
374 if (unlikely(!host)) {
375 ql_dbg(ql_dbg_tgt, vha, 0xe045,
376 "qla_target(%d): Response pkt "
377 "(ABTS_RECV_24XX) received, with unknown "
378 "vp_index %d\n", vha->vp_idx, entry->vp_index);
381 qlt_response_pkt(host, pkt);
386 qlt_response_pkt(vha, pkt);
392 static void qlt_free_session_done(struct work_struct *work)
394 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
396 struct qla_tgt *tgt = sess->tgt;
397 struct scsi_qla_host *vha = sess->vha;
398 struct qla_hw_data *ha = vha->hw;
400 bool logout_started = false;
403 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
404 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
405 " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
406 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
407 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
408 sess->logout_on_delete, sess->keep_nport_handle,
409 sess->plogi_ack_needed);
413 if (sess->logout_on_delete) {
416 memset(&fcport, 0, sizeof(fcport));
417 fcport.loop_id = sess->loop_id;
418 fcport.d_id = sess->s_id;
419 memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
421 fcport.tgt_session = sess;
423 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
424 if (rc != QLA_SUCCESS)
425 ql_log(ql_log_warn, vha, 0xf085,
426 "Schedule logo failed sess %p rc %d\n",
429 logout_started = true;
433 * Release the target session for FC Nexus from fabric module code.
435 if (sess->se_sess != NULL)
436 ha->tgt.tgt_ops->free_session(sess);
438 if (logout_started) {
441 while (!ACCESS_ONCE(sess->logout_completed)) {
443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
444 "%s: waiting for sess %p logout\n",
451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
452 "%s: sess %p logout completed\n",
456 spin_lock_irqsave(&ha->hardware_lock, flags);
458 if (sess->plogi_ack_needed)
459 qlt_send_notify_ack(vha, &sess->tm_iocb,
462 list_del(&sess->sess_list_entry);
464 spin_unlock_irqrestore(&ha->hardware_lock, flags);
466 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
467 "Unregistration of sess %p finished\n", sess);
471 * We need to protect against race, when tgt is freed before or
475 if (tgt->sess_count == 0)
476 wake_up_all(&tgt->waitQ);
479 /* ha->hardware_lock supposed to be held on entry */
480 void qlt_unreg_sess(struct qla_tgt_sess *sess)
482 struct scsi_qla_host *vha = sess->vha;
484 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
486 if (!list_empty(&sess->del_list_entry))
487 list_del_init(&sess->del_list_entry);
488 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
490 INIT_WORK(&sess->free_work, qlt_free_session_done);
491 schedule_work(&sess->free_work);
493 EXPORT_SYMBOL(qlt_unreg_sess);
495 /* ha->hardware_lock supposed to be held on entry */
496 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
498 struct qla_hw_data *ha = vha->hw;
499 struct qla_tgt_sess *sess = NULL;
500 uint32_t unpacked_lun, lun = 0;
503 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
504 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
506 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
507 if (loop_id == 0xFFFF) {
509 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
510 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
511 #if 0 /* FIXME: do we need to choose a session here? */
512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
514 typeof(*sess), sess_list_entry);
516 case QLA_TGT_NEXUS_LOSS_SESS:
517 mcmd = QLA_TGT_NEXUS_LOSS;
519 case QLA_TGT_ABORT_ALL_SESS:
520 mcmd = QLA_TGT_ABORT_ALL;
522 case QLA_TGT_NEXUS_LOSS:
523 case QLA_TGT_ABORT_ALL:
526 ql_dbg(ql_dbg_tgt, vha, 0xe046,
527 "qla_target(%d): Not allowed "
528 "command %x in %s", vha->vp_idx,
537 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
540 ql_dbg(ql_dbg_tgt, vha, 0xe000,
541 "Using sess for qla_tgt_reset: %p\n", sess);
547 ql_dbg(ql_dbg_tgt, vha, 0xe047,
548 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
549 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
552 lun = a->u.isp24.fcp_cmnd.lun;
553 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
555 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
556 iocb, QLA24XX_MGMT_SEND_NACK);
559 /* ha->hardware_lock supposed to be held on entry */
560 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
563 struct qla_tgt *tgt = sess->tgt;
564 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
567 /* Upgrade to unconditional deletion in case it was temporary */
568 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
569 list_del(&sess->del_list_entry);
574 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
575 "Scheduling sess %p for deletion\n", sess);
579 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
580 list_add(&sess->del_list_entry, &tgt->del_sess_list);
582 sess->deleted = QLA_SESS_DELETION_PENDING;
583 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
586 sess->expires = jiffies + dev_loss_tmo * HZ;
588 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
589 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
590 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
591 sess->vha->vp_idx, sess->port_name, sess->loop_id,
592 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
593 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
597 mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
599 schedule_delayed_work(&tgt->sess_del_work,
600 sess->expires - jiffies);
603 /* ha->hardware_lock supposed to be held on entry */
604 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
606 struct qla_tgt_sess *sess;
608 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
609 qlt_schedule_sess_for_deletion(sess, true);
611 /* At this point tgt could be already dead */
614 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
617 struct qla_hw_data *ha = vha->hw;
618 dma_addr_t gid_list_dma;
619 struct gid_list_info *gid_list;
624 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
625 &gid_list_dma, GFP_KERNEL);
627 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
628 "qla_target(%d): DMA Alloc failed of %u\n",
629 vha->vp_idx, qla2x00_gid_list_size(ha));
633 /* Get list of logged in devices */
634 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
635 if (rc != QLA_SUCCESS) {
636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
637 "qla_target(%d): get_id_list() failed: %x\n",
640 goto out_free_id_list;
643 id_iter = (char *)gid_list;
645 for (i = 0; i < entries; i++) {
646 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
647 if ((gid->al_pa == s_id[2]) &&
648 (gid->area == s_id[1]) &&
649 (gid->domain == s_id[0])) {
650 *loop_id = le16_to_cpu(gid->loop_id);
654 id_iter += ha->gid_list_info_size;
658 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
659 gid_list, gid_list_dma);
663 /* ha->hardware_lock supposed to be held on entry */
664 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
666 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
668 list_del_init(&sess->del_list_entry);
672 static void qlt_del_sess_work_fn(struct delayed_work *work)
674 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
676 struct scsi_qla_host *vha = tgt->vha;
677 struct qla_hw_data *ha = vha->hw;
678 struct qla_tgt_sess *sess;
679 unsigned long flags, elapsed;
681 spin_lock_irqsave(&ha->hardware_lock, flags);
682 while (!list_empty(&tgt->del_sess_list)) {
683 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
686 if (time_after_eq(elapsed, sess->expires)) {
687 /* No turning back */
688 list_del_init(&sess->del_list_entry);
689 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
691 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
692 "Timeout: sess %p about to be deleted\n",
694 ha->tgt.tgt_ops->shutdown_sess(sess);
695 ha->tgt.tgt_ops->put_sess(sess);
697 schedule_delayed_work(&tgt->sess_del_work,
698 sess->expires - elapsed);
702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
706 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
707 * Caller must put it.
709 static struct qla_tgt_sess *qlt_create_sess(
710 struct scsi_qla_host *vha,
714 struct qla_hw_data *ha = vha->hw;
715 struct qla_tgt_sess *sess;
717 unsigned char be_sid[3];
719 /* Check to avoid double sessions */
720 spin_lock_irqsave(&ha->hardware_lock, flags);
721 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
723 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
724 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
725 "Double sess %p found (s_id %x:%x:%x, "
726 "loop_id %d), updating to d_id %x:%x:%x, "
727 "loop_id %d", sess, sess->s_id.b.domain,
728 sess->s_id.b.al_pa, sess->s_id.b.area,
729 sess->loop_id, fcport->d_id.b.domain,
730 fcport->d_id.b.al_pa, fcport->d_id.b.area,
733 /* Cannot undelete at this point */
734 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
735 spin_unlock_irqrestore(&ha->hardware_lock,
741 qlt_undelete_sess(sess);
743 kref_get(&sess->se_sess->sess_kref);
744 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
745 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
747 if (sess->local && !local)
750 qlt_do_generation_tick(vha, &sess->generation);
752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
757 spin_unlock_irqrestore(&ha->hardware_lock, flags);
759 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
761 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
762 "qla_target(%u): session allocation failed, all commands "
763 "from port %8phC will be refused", vha->vp_idx,
768 sess->tgt = vha->vha_tgt.qla_tgt;
770 sess->s_id = fcport->d_id;
771 sess->loop_id = fcport->loop_id;
773 INIT_LIST_HEAD(&sess->del_list_entry);
775 /* Under normal circumstances we want to logout from firmware when
776 * session eventually ends and release corresponding nport handle.
777 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
778 * code will adjust these flags as necessary. */
779 sess->logout_on_delete = 1;
780 sess->keep_nport_handle = 0;
782 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
783 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
784 sess, vha->vha_tgt.qla_tgt);
786 be_sid[0] = sess->s_id.b.domain;
787 be_sid[1] = sess->s_id.b.area;
788 be_sid[2] = sess->s_id.b.al_pa;
790 * Determine if this fc_port->port_name is allowed to access
791 * target mode using explict NodeACLs+MappedLUNs, or using
792 * TPG demo mode. If this is successful a target mode FC nexus
795 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
796 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
801 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
802 * access across ->hardware_lock reaquire.
804 kref_get(&sess->se_sess->sess_kref);
806 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
807 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
808 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
810 spin_lock_irqsave(&ha->hardware_lock, flags);
811 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
812 vha->vha_tgt.qla_tgt->sess_count++;
813 qlt_do_generation_tick(vha, &sess->generation);
814 spin_unlock_irqrestore(&ha->hardware_lock, flags);
816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
817 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
818 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
819 vha->vp_idx, local ? "local " : "", fcport->port_name,
820 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
821 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
827 * Called from qla2x00_reg_remote_port()
829 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
831 struct qla_hw_data *ha = vha->hw;
832 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
833 struct qla_tgt_sess *sess;
836 if (!vha->hw->tgt.tgt_ops)
839 if (!tgt || (fcport->port_type != FCT_INITIATOR))
842 if (qla_ini_mode_enabled(vha))
845 spin_lock_irqsave(&ha->hardware_lock, flags);
847 spin_unlock_irqrestore(&ha->hardware_lock, flags);
850 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
852 spin_unlock_irqrestore(&ha->hardware_lock, flags);
854 mutex_lock(&vha->vha_tgt.tgt_mutex);
855 sess = qlt_create_sess(vha, fcport, false);
856 mutex_unlock(&vha->vha_tgt.tgt_mutex);
858 spin_lock_irqsave(&ha->hardware_lock, flags);
859 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
860 /* Point of no return */
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
864 kref_get(&sess->se_sess->sess_kref);
867 qlt_undelete_sess(sess);
869 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
870 "qla_target(%u): %ssession for port %8phC "
871 "(loop ID %d) reappeared\n", vha->vp_idx,
872 sess->local ? "local " : "", sess->port_name,
875 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
876 "Reappeared sess %p\n", sess);
878 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
879 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
882 if (sess && sess->local) {
883 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
884 "qla_target(%u): local session for "
885 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
886 fcport->port_name, sess->loop_id);
889 ha->tgt.tgt_ops->put_sess(sess);
890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
894 * max_gen - specifies maximum session generation
895 * at which this deletion requestion is still valid
898 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
900 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
901 struct qla_tgt_sess *sess;
903 if (!vha->hw->tgt.tgt_ops)
912 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
917 if (max_gen - sess->generation < 0) {
918 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
919 "Ignoring stale deletion request for se_sess %p / sess %p"
920 " for port %8phC, req_gen %d, sess_gen %d\n",
921 sess->se_sess, sess, sess->port_name, max_gen,
926 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
929 qlt_schedule_sess_for_deletion(sess, false);
932 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
934 struct qla_hw_data *ha = tgt->ha;
938 * We need to protect against race, when tgt is freed before or
941 spin_lock_irqsave(&ha->hardware_lock, flags);
942 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
943 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
944 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
945 res = (tgt->sess_count == 0);
946 spin_unlock_irqrestore(&ha->hardware_lock, flags);
951 /* Called by tcm_qla2xxx configfs code */
952 int qlt_stop_phase1(struct qla_tgt *tgt)
954 struct scsi_qla_host *vha = tgt->vha;
955 struct qla_hw_data *ha = tgt->ha;
958 mutex_lock(&qla_tgt_mutex);
959 if (!vha->fc_vport) {
960 struct Scsi_Host *sh = vha->host;
961 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
964 spin_lock_irqsave(sh->host_lock, flags);
965 npiv_vports = (fc_host->npiv_vports_inuse);
966 spin_unlock_irqrestore(sh->host_lock, flags);
969 mutex_unlock(&qla_tgt_mutex);
973 if (tgt->tgt_stop || tgt->tgt_stopped) {
974 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
975 "Already in tgt->tgt_stop or tgt_stopped state\n");
976 mutex_unlock(&qla_tgt_mutex);
980 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
983 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
984 * Lock is needed, because we still can get an incoming packet.
986 mutex_lock(&vha->vha_tgt.tgt_mutex);
987 spin_lock_irqsave(&ha->hardware_lock, flags);
989 qlt_clear_tgt_db(tgt);
990 spin_unlock_irqrestore(&ha->hardware_lock, flags);
991 mutex_unlock(&vha->vha_tgt.tgt_mutex);
992 mutex_unlock(&qla_tgt_mutex);
994 flush_delayed_work(&tgt->sess_del_work);
996 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
997 "Waiting for sess works (tgt %p)", tgt);
998 spin_lock_irqsave(&tgt->sess_work_lock, flags);
999 while (!list_empty(&tgt->sess_works_list)) {
1000 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1001 flush_scheduled_work();
1002 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1004 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1006 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1007 "Waiting for tgt %p: list_empty(sess_list)=%d "
1008 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
1011 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1014 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
1015 qlt_disable_vha(vha);
1017 /* Wait for sessions to clear out (just in case) */
1018 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1021 EXPORT_SYMBOL(qlt_stop_phase1);
1023 /* Called by tcm_qla2xxx configfs code */
1024 void qlt_stop_phase2(struct qla_tgt *tgt)
1026 struct qla_hw_data *ha = tgt->ha;
1027 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1028 unsigned long flags;
1030 if (tgt->tgt_stopped) {
1031 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1032 "Already in tgt->tgt_stopped state\n");
1037 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1038 "Waiting for %d IRQ commands to complete (tgt %p)",
1039 tgt->irq_cmd_count, tgt);
1041 mutex_lock(&vha->vha_tgt.tgt_mutex);
1042 spin_lock_irqsave(&ha->hardware_lock, flags);
1043 while (tgt->irq_cmd_count != 0) {
1044 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1046 spin_lock_irqsave(&ha->hardware_lock, flags);
1049 tgt->tgt_stopped = 1;
1050 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1051 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1053 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
1056 EXPORT_SYMBOL(qlt_stop_phase2);
1058 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1059 static void qlt_release(struct qla_tgt *tgt)
1061 scsi_qla_host_t *vha = tgt->vha;
1063 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1064 qlt_stop_phase2(tgt);
1066 vha->vha_tgt.qla_tgt = NULL;
1068 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1069 "Release of tgt %p finished\n", tgt);
1074 /* ha->hardware_lock supposed to be held on entry */
1075 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1076 const void *param, unsigned int param_size)
1078 struct qla_tgt_sess_work_param *prm;
1079 unsigned long flags;
1081 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1083 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1084 "qla_target(%d): Unable to create session "
1085 "work, command will be refused", 0);
1089 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1090 "Scheduling work (type %d, prm %p)"
1091 " to find session for param %p (size %d, tgt %p)\n",
1092 type, prm, param, param_size, tgt);
1095 memcpy(&prm->tm_iocb, param, param_size);
1097 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1098 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1099 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1101 schedule_work(&tgt->sess_work);
1107 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1109 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1110 struct imm_ntfy_from_isp *ntfy,
1111 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1112 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1114 struct qla_hw_data *ha = vha->hw;
1116 struct nack_to_isp *nack;
1118 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1120 /* Send marker if required */
1121 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1124 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1126 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1127 "qla_target(%d): %s failed: unable to allocate "
1128 "request packet\n", vha->vp_idx, __func__);
1132 if (vha->vha_tgt.qla_tgt != NULL)
1133 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1135 pkt->entry_type = NOTIFY_ACK_TYPE;
1136 pkt->entry_count = 1;
1138 nack = (struct nack_to_isp *)pkt;
1139 nack->ox_id = ntfy->ox_id;
1141 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1142 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1143 nack->u.isp24.flags = ntfy->u.isp24.flags &
1144 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1146 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1147 nack->u.isp24.status = ntfy->u.isp24.status;
1148 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1149 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1150 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1151 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1152 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1153 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1154 nack->u.isp24.srr_reject_code = srr_reject_code;
1155 nack->u.isp24.srr_reject_code_expl = srr_explan;
1156 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1158 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1159 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1160 vha->vp_idx, nack->u.isp24.status);
1162 /* Memory Barrier */
1164 qla2x00_start_iocbs(vha, vha->req);
1168 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1170 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1171 struct abts_recv_from_24xx *abts, uint32_t status,
1174 struct qla_hw_data *ha = vha->hw;
1175 struct abts_resp_to_24xx *resp;
1179 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1180 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1183 /* Send marker if required */
1184 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1187 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1189 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1190 "qla_target(%d): %s failed: unable to allocate "
1191 "request packet", vha->vp_idx, __func__);
1195 resp->entry_type = ABTS_RESP_24XX;
1196 resp->entry_count = 1;
1197 resp->nport_handle = abts->nport_handle;
1198 resp->vp_index = vha->vp_idx;
1199 resp->sof_type = abts->sof_type;
1200 resp->exchange_address = abts->exchange_address;
1201 resp->fcp_hdr_le = abts->fcp_hdr_le;
1202 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1203 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1204 F_CTL_SEQ_INITIATIVE);
1205 p = (uint8_t *)&f_ctl;
1206 resp->fcp_hdr_le.f_ctl[0] = *p++;
1207 resp->fcp_hdr_le.f_ctl[1] = *p++;
1208 resp->fcp_hdr_le.f_ctl[2] = *p;
1210 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1211 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1212 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1213 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1214 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1215 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1217 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1218 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1219 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1220 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1221 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1222 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1224 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1225 if (status == FCP_TMF_CMPL) {
1226 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1227 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1228 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1229 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1230 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1231 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1233 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1234 resp->payload.ba_rjt.reason_code =
1235 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1236 /* Other bytes are zero */
1239 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1241 /* Memory Barrier */
1243 qla2x00_start_iocbs(vha, vha->req);
1247 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1249 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1250 struct abts_resp_from_24xx_fw *entry)
1252 struct ctio7_to_24xx *ctio;
1254 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1255 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1256 /* Send marker if required */
1257 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1260 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1262 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1263 "qla_target(%d): %s failed: unable to allocate "
1264 "request packet\n", vha->vp_idx, __func__);
1269 * We've got on entrance firmware's response on by us generated
1270 * ABTS response. So, in it ID fields are reversed.
1273 ctio->entry_type = CTIO_TYPE7;
1274 ctio->entry_count = 1;
1275 ctio->nport_handle = entry->nport_handle;
1276 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1277 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1278 ctio->vp_index = vha->vp_idx;
1279 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1280 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1281 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1282 ctio->exchange_addr = entry->exchange_addr_to_abort;
1283 ctio->u.status1.flags =
1284 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1285 CTIO7_FLAGS_TERMINATE);
1286 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1288 /* Memory Barrier */
1290 qla2x00_start_iocbs(vha, vha->req);
1292 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1293 FCP_TMF_CMPL, true);
1296 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1298 struct qla_tgt_sess_op *op;
1299 struct qla_tgt_cmd *cmd;
1301 spin_lock(&vha->cmd_list_lock);
1303 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1304 if (tag == op->atio.u.isp24.exchange_addr) {
1306 spin_unlock(&vha->cmd_list_lock);
1311 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1312 if (tag == cmd->atio.u.isp24.exchange_addr) {
1313 cmd->state = QLA_TGT_STATE_ABORTED;
1314 spin_unlock(&vha->cmd_list_lock);
1319 spin_unlock(&vha->cmd_list_lock);
1323 /* drop cmds for the given lun
1324 * XXX only looks for cmds on the port through which lun reset was recieved
1325 * XXX does not go through the list of other port (which may have cmds
1328 static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1329 uint32_t lun, uint8_t *s_id)
1331 struct qla_tgt_sess_op *op;
1332 struct qla_tgt_cmd *cmd;
1335 key = sid_to_key(s_id);
1336 spin_lock(&vha->cmd_list_lock);
1337 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1341 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1342 op_lun = scsilun_to_int(
1343 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1344 if (op_key == key && op_lun == lun)
1347 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1351 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1352 cmd_lun = scsilun_to_int(
1353 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1354 if (cmd_key == key && cmd_lun == lun)
1355 cmd->state = QLA_TGT_STATE_ABORTED;
1357 spin_unlock(&vha->cmd_list_lock);
1360 /* ha->hardware_lock supposed to be held on entry */
1361 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1362 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1364 struct qla_hw_data *ha = vha->hw;
1365 struct se_session *se_sess = sess->se_sess;
1366 struct qla_tgt_mgmt_cmd *mcmd;
1367 struct se_cmd *se_cmd;
1370 bool found_lun = false;
1372 spin_lock(&se_sess->sess_cmd_lock);
1373 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1374 struct qla_tgt_cmd *cmd =
1375 container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1376 if (se_cmd->tag == abts->exchange_addr_to_abort) {
1377 lun = cmd->unpacked_lun;
1382 spin_unlock(&se_sess->sess_cmd_lock);
1384 /* cmd not in LIO lists, look in qla list */
1386 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1387 /* send TASK_ABORT response immediately */
1388 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1392 "unable to find cmd in driver or LIO for tag 0x%x\n",
1393 abts->exchange_addr_to_abort);
1398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1399 "qla_target(%d): task abort (tag=%d)\n",
1400 vha->vp_idx, abts->exchange_addr_to_abort);
1402 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1404 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1405 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1406 vha->vp_idx, __func__);
1409 memset(mcmd, 0, sizeof(*mcmd));
1412 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1413 mcmd->reset_count = vha->hw->chip_reset;
1415 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1416 abts->exchange_addr_to_abort);
1418 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1419 "qla_target(%d): tgt_ops->handle_tmr()"
1420 " failed: %d", vha->vp_idx, rc);
1421 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1429 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1431 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1432 struct abts_recv_from_24xx *abts)
1434 struct qla_hw_data *ha = vha->hw;
1435 struct qla_tgt_sess *sess;
1436 uint32_t tag = abts->exchange_addr_to_abort;
1440 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1441 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1442 "qla_target(%d): ABTS: Abort Sequence not "
1443 "supported\n", vha->vp_idx);
1444 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1448 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1449 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1450 "qla_target(%d): ABTS: Unknown Exchange "
1451 "Address received\n", vha->vp_idx);
1452 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1456 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1457 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1458 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1459 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1460 le32_to_cpu(abts->fcp_hdr_le.parameter));
1462 s_id[0] = abts->fcp_hdr_le.s_id[2];
1463 s_id[1] = abts->fcp_hdr_le.s_id[1];
1464 s_id[2] = abts->fcp_hdr_le.s_id[0];
1466 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1468 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1469 "qla_target(%d): task abort for non-existant session\n",
1471 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1472 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1474 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1480 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1481 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1485 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1487 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1488 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1490 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1496 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1498 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1499 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1501 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1502 struct ctio7_to_24xx *ctio;
1505 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1506 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1507 ha, atio, resp_code);
1509 /* Send marker if required */
1510 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1513 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1515 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1516 "qla_target(%d): %s failed: unable to allocate "
1517 "request packet\n", ha->vp_idx, __func__);
1521 ctio->entry_type = CTIO_TYPE7;
1522 ctio->entry_count = 1;
1523 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1524 ctio->nport_handle = mcmd->sess->loop_id;
1525 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1526 ctio->vp_index = ha->vp_idx;
1527 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1528 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1529 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1530 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1531 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1532 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1533 CTIO7_FLAGS_SEND_STATUS);
1534 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1535 ctio->u.status1.ox_id = cpu_to_le16(temp);
1536 ctio->u.status1.scsi_status =
1537 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1538 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1539 ctio->u.status1.sense_data[0] = resp_code;
1541 /* Memory Barrier */
1543 qla2x00_start_iocbs(ha, ha->req);
1546 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1548 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1550 EXPORT_SYMBOL(qlt_free_mcmd);
1552 /* callback from target fabric module code */
1553 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1555 struct scsi_qla_host *vha = mcmd->sess->vha;
1556 struct qla_hw_data *ha = vha->hw;
1557 unsigned long flags;
1559 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1560 "TM response mcmd (%p) status %#x state %#x",
1561 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1563 spin_lock_irqsave(&ha->hardware_lock, flags);
1565 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
1567 * Either a chip reset is active or this request was from
1568 * previous life, just abort the processing.
1570 ql_dbg(ql_dbg_async, vha, 0xe100,
1571 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
1572 qla2x00_reset_active(vha), mcmd->reset_count,
1574 ha->tgt.tgt_ops->free_mcmd(mcmd);
1575 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1579 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1580 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1583 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1584 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1585 mcmd->fc_tm_rsp, false);
1587 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1591 * Make the callback for ->free_mcmd() to queue_work() and invoke
1592 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1593 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1594 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1595 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1596 * qlt_xmit_tm_rsp() returns here..
1598 ha->tgt.tgt_ops->free_mcmd(mcmd);
1599 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1601 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1604 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1606 struct qla_tgt_cmd *cmd = prm->cmd;
1608 BUG_ON(cmd->sg_cnt == 0);
1610 prm->sg = (struct scatterlist *)cmd->sg;
1611 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1612 cmd->sg_cnt, cmd->dma_data_direction);
1613 if (unlikely(prm->seg_cnt == 0))
1616 prm->cmd->sg_mapped = 1;
1618 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
1620 * If greater than four sg entries then we need to allocate
1621 * the continuation entries
1623 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1624 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1625 prm->tgt->datasegs_per_cmd,
1626 prm->tgt->datasegs_per_cont);
1629 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1630 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1631 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
1632 prm->tot_dsds = prm->seg_cnt;
1634 prm->tot_dsds = prm->seg_cnt;
1636 if (cmd->prot_sg_cnt) {
1637 prm->prot_sg = cmd->prot_sg;
1638 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1639 cmd->prot_sg, cmd->prot_sg_cnt,
1640 cmd->dma_data_direction);
1641 if (unlikely(prm->prot_seg_cnt == 0))
1644 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1645 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1646 /* Dif Bundling not support here */
1647 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
1649 prm->tot_dsds += prm->prot_seg_cnt;
1651 prm->tot_dsds += prm->prot_seg_cnt;
1658 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1659 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1660 0, prm->cmd->sg_cnt);
1664 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
1666 struct qla_hw_data *ha = vha->hw;
1668 if (!cmd->sg_mapped)
1671 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1674 if (cmd->prot_sg_cnt)
1675 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1676 cmd->dma_data_direction);
1678 if (cmd->ctx_dsd_alloced)
1679 qla2x00_clean_dsd_pool(ha, NULL, cmd);
1682 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
1685 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1688 uint32_t cnt, cnt_in;
1690 if (vha->req->cnt < (req_cnt + 2)) {
1691 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
1692 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
1694 if (vha->req->ring_index < cnt)
1695 vha->req->cnt = cnt - vha->req->ring_index;
1697 vha->req->cnt = vha->req->length -
1698 (vha->req->ring_index - cnt);
1701 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1702 ql_dbg(ql_dbg_io, vha, 0x305a,
1703 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1704 vha->vp_idx, vha->req->ring_index,
1705 vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
1708 vha->req->cnt -= req_cnt;
1714 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1716 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1718 /* Adjust ring index. */
1719 vha->req->ring_index++;
1720 if (vha->req->ring_index == vha->req->length) {
1721 vha->req->ring_index = 0;
1722 vha->req->ring_ptr = vha->req->ring;
1724 vha->req->ring_ptr++;
1726 return (cont_entry_t *)vha->req->ring_ptr;
1729 /* ha->hardware_lock supposed to be held on entry */
1730 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1732 struct qla_hw_data *ha = vha->hw;
1735 h = ha->tgt.current_handle;
1736 /* always increment cmd handle */
1739 if (h > DEFAULT_OUTSTANDING_COMMANDS)
1740 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1741 if (h == ha->tgt.current_handle) {
1742 ql_dbg(ql_dbg_io, vha, 0x305b,
1743 "qla_target(%d): Ran out of "
1744 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1745 h = QLA_TGT_NULL_HANDLE;
1748 } while ((h == QLA_TGT_NULL_HANDLE) ||
1749 (h == QLA_TGT_SKIP_HANDLE) ||
1750 (ha->tgt.cmds[h-1] != NULL));
1752 if (h != QLA_TGT_NULL_HANDLE)
1753 ha->tgt.current_handle = h;
1758 /* ha->hardware_lock supposed to be held on entry */
1759 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1760 struct scsi_qla_host *vha)
1763 struct ctio7_to_24xx *pkt;
1764 struct qla_hw_data *ha = vha->hw;
1765 struct atio_from_isp *atio = &prm->cmd->atio;
1768 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1770 memset(pkt, 0, sizeof(*pkt));
1772 pkt->entry_type = CTIO_TYPE7;
1773 pkt->entry_count = (uint8_t)prm->req_cnt;
1774 pkt->vp_index = vha->vp_idx;
1776 h = qlt_make_handle(vha);
1777 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1779 * CTIO type 7 from the firmware doesn't provide a way to
1780 * know the initiator's LOOP ID, hence we can't find
1781 * the session and, so, the command.
1785 ha->tgt.cmds[h-1] = prm->cmd;
1787 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1788 pkt->nport_handle = prm->cmd->loop_id;
1789 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1790 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1791 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1792 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1793 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1794 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1795 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1796 pkt->u.status0.ox_id = cpu_to_le16(temp);
1797 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1803 * ha->hardware_lock supposed to be held on entry. We have already made sure
1804 * that there is sufficient amount of request entries to not drop it.
1806 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1807 struct scsi_qla_host *vha)
1810 uint32_t *dword_ptr;
1811 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1813 /* Build continuation packets */
1814 while (prm->seg_cnt > 0) {
1815 cont_a64_entry_t *cont_pkt64 =
1816 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1819 * Make sure that from cont_pkt64 none of
1820 * 64-bit specific fields used for 32-bit
1821 * addressing. Cast to (cont_entry_t *) for
1825 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1827 cont_pkt64->entry_count = 1;
1828 cont_pkt64->sys_define = 0;
1830 if (enable_64bit_addressing) {
1831 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1833 (uint32_t *)&cont_pkt64->dseg_0_address;
1835 cont_pkt64->entry_type = CONTINUE_TYPE;
1837 (uint32_t *)&((cont_entry_t *)
1838 cont_pkt64)->dseg_0_address;
1841 /* Load continuation entry data segments */
1843 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1844 cnt++, prm->seg_cnt--) {
1846 cpu_to_le32(pci_dma_lo32
1847 (sg_dma_address(prm->sg)));
1848 if (enable_64bit_addressing) {
1850 cpu_to_le32(pci_dma_hi32
1854 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1856 prm->sg = sg_next(prm->sg);
1862 * ha->hardware_lock supposed to be held on entry. We have already made sure
1863 * that there is sufficient amount of request entries to not drop it.
1865 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1866 struct scsi_qla_host *vha)
1869 uint32_t *dword_ptr;
1870 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1871 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1873 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1875 /* Setup packet address segment pointer */
1876 dword_ptr = pkt24->u.status0.dseg_0_address;
1878 /* Set total data segment count */
1880 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1882 if (prm->seg_cnt == 0) {
1883 /* No data transfer */
1889 /* If scatter gather */
1891 /* Load command entry data segments */
1893 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1894 cnt++, prm->seg_cnt--) {
1896 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1897 if (enable_64bit_addressing) {
1899 cpu_to_le32(pci_dma_hi32(
1900 sg_dma_address(prm->sg)));
1902 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1904 prm->sg = sg_next(prm->sg);
1907 qlt_load_cont_data_segments(prm, vha);
1910 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1912 return cmd->bufflen > 0;
1916 * Called without ha->hardware_lock held
1918 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1919 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1920 uint32_t *full_req_cnt)
1922 struct qla_tgt *tgt = cmd->tgt;
1923 struct scsi_qla_host *vha = tgt->vha;
1924 struct qla_hw_data *ha = vha->hw;
1925 struct se_cmd *se_cmd = &cmd->se_cmd;
1927 if (unlikely(cmd->aborted)) {
1928 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1929 "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
1930 vha->vp_idx, cmd, se_cmd, se_cmd->tag);
1932 cmd->state = QLA_TGT_STATE_ABORTED;
1933 cmd->cmd_flags |= BIT_6;
1935 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1937 /* !! At this point cmd could be already freed !! */
1938 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1943 prm->rq_result = scsi_status;
1944 prm->sense_buffer = &cmd->sense_buffer[0];
1945 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1949 prm->add_status_pkt = 0;
1951 /* Send marker if required */
1952 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1955 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1956 if (qlt_pci_map_calc_cnt(prm) != 0)
1960 *full_req_cnt = prm->req_cnt;
1962 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1963 prm->residual = se_cmd->residual_count;
1964 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
1965 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
1966 prm->residual, se_cmd->tag,
1967 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1968 cmd->bufflen, prm->rq_result);
1969 prm->rq_result |= SS_RESIDUAL_UNDER;
1970 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1971 prm->residual = se_cmd->residual_count;
1972 ql_dbg(ql_dbg_io, vha, 0x305d,
1973 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
1974 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
1975 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
1976 prm->rq_result |= SS_RESIDUAL_OVER;
1979 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1981 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1982 * ignored in *xmit_response() below
1984 if (qlt_has_data(cmd)) {
1985 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1986 (IS_FWI2_CAPABLE(ha) &&
1987 (prm->rq_result != 0))) {
1988 prm->add_status_pkt = 1;
1997 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1998 struct qla_tgt_cmd *cmd, int sending_sense)
2000 if (ha->tgt.enable_class_2)
2004 return cmd->conf_compl_supported;
2006 return ha->tgt.enable_explicit_conf &&
2007 cmd->conf_compl_supported;
2010 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
2012 * Original taken from the XFS code
2014 static unsigned long qlt_srr_random(void)
2017 static unsigned long RandomValue;
2018 static DEFINE_SPINLOCK(lock);
2019 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
2023 unsigned long flags;
2025 spin_lock_irqsave(&lock, flags);
2027 RandomValue = jiffies;
2033 rv = 16807 * lo - 2836 * hi;
2037 spin_unlock_irqrestore(&lock, flags);
2041 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
2043 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
2044 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
2046 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
2047 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
2048 "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
2052 * It's currently not possible to simulate SRRs for FCP_WRITE without
2053 * a physical link layer failure, so don't even try here..
2055 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
2058 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
2059 ((qlt_srr_random() % 100) == 20)) {
2061 unsigned int tot_len = 0;
2064 leave = qlt_srr_random() % cmd->sg_cnt;
2066 for (i = 0; i < leave; i++)
2067 tot_len += cmd->sg[i].length;
2069 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
2070 "Cutting cmd %p (tag %d) buffer"
2071 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
2072 " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
2073 cmd->bufflen, cmd->sg_cnt);
2075 cmd->bufflen = tot_len;
2076 cmd->sg_cnt = leave;
2079 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
2080 unsigned int offset = qlt_srr_random() % cmd->bufflen;
2082 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
2083 "Cutting cmd %p (tag %d) buffer head "
2084 "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
2087 *xmit_type &= ~QLA_TGT_XMIT_DATA;
2088 else if (qlt_set_data_offset(cmd, offset)) {
2089 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
2090 "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
2095 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
2099 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2100 struct qla_tgt_prm *prm)
2102 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2103 (uint32_t)sizeof(ctio->u.status1.sense_data));
2104 ctio->u.status0.flags |=
2105 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2106 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2107 ctio->u.status0.flags |= __constant_cpu_to_le16(
2108 CTIO7_FLAGS_EXPLICIT_CONFORM |
2109 CTIO7_FLAGS_CONFORM_REQ);
2111 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2112 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2113 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2116 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2117 if (prm->cmd->se_cmd.scsi_status != 0) {
2118 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
2119 "Skipping EXPLICIT_CONFORM and "
2120 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2121 "non GOOD status\n");
2122 goto skip_explict_conf;
2124 ctio->u.status1.flags |= __constant_cpu_to_le16(
2125 CTIO7_FLAGS_EXPLICIT_CONFORM |
2126 CTIO7_FLAGS_CONFORM_REQ);
2129 ctio->u.status1.flags &=
2130 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2131 ctio->u.status1.flags |=
2132 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2133 ctio->u.status1.scsi_status |=
2134 __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
2135 ctio->u.status1.sense_length =
2136 cpu_to_le16(prm->sense_buffer_len);
2137 for (i = 0; i < prm->sense_buffer_len/4; i++)
2138 ((uint32_t *)ctio->u.status1.sense_data)[i] =
2139 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2141 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
2144 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2145 "qla_target(%d): %d bytes of sense "
2146 "lost", prm->tgt->ha->vp_idx,
2147 prm->sense_buffer_len % 4);
2153 ctio->u.status1.flags &=
2154 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2155 ctio->u.status1.flags |=
2156 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2157 ctio->u.status1.sense_length = 0;
2158 memset(ctio->u.status1.sense_data, 0,
2159 sizeof(ctio->u.status1.sense_data));
2162 /* Sense with len > 24, is it possible ??? */
2169 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2172 * Uncomment when corresponding SCSI changes are done.
2174 if (!sp->cmd->prot_chk)
2178 switch (se_cmd->prot_op) {
2179 case TARGET_PROT_DOUT_INSERT:
2180 case TARGET_PROT_DIN_STRIP:
2181 if (ql2xenablehba_err_chk >= 1)
2184 case TARGET_PROT_DOUT_PASS:
2185 case TARGET_PROT_DIN_PASS:
2186 if (ql2xenablehba_err_chk >= 2)
2189 case TARGET_PROT_DIN_INSERT:
2190 case TARGET_PROT_DOUT_STRIP:
2199 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2203 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2205 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2207 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2208 * have been immplemented by TCM, before AppTag is avail.
2209 * Look for modesense_handlers[]
2212 ctx->app_tag_mask[0] = 0x0;
2213 ctx->app_tag_mask[1] = 0x0;
2215 switch (se_cmd->prot_type) {
2216 case TARGET_DIF_TYPE0_PROT:
2218 * No check for ql2xenablehba_err_chk, as it would be an
2219 * I/O error if hba tag generation is not done.
2221 ctx->ref_tag = cpu_to_le32(lba);
2223 if (!qlt_hba_err_chk_enabled(se_cmd))
2226 /* enable ALL bytes of the ref tag */
2227 ctx->ref_tag_mask[0] = 0xff;
2228 ctx->ref_tag_mask[1] = 0xff;
2229 ctx->ref_tag_mask[2] = 0xff;
2230 ctx->ref_tag_mask[3] = 0xff;
2233 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2236 case TARGET_DIF_TYPE1_PROT:
2237 ctx->ref_tag = cpu_to_le32(lba);
2239 if (!qlt_hba_err_chk_enabled(se_cmd))
2242 /* enable ALL bytes of the ref tag */
2243 ctx->ref_tag_mask[0] = 0xff;
2244 ctx->ref_tag_mask[1] = 0xff;
2245 ctx->ref_tag_mask[2] = 0xff;
2246 ctx->ref_tag_mask[3] = 0xff;
2249 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2250 * match LBA in CDB + N
2252 case TARGET_DIF_TYPE2_PROT:
2253 ctx->ref_tag = cpu_to_le32(lba);
2255 if (!qlt_hba_err_chk_enabled(se_cmd))
2258 /* enable ALL bytes of the ref tag */
2259 ctx->ref_tag_mask[0] = 0xff;
2260 ctx->ref_tag_mask[1] = 0xff;
2261 ctx->ref_tag_mask[2] = 0xff;
2262 ctx->ref_tag_mask[3] = 0xff;
2265 /* For Type 3 protection: 16 bit GUARD only */
2266 case TARGET_DIF_TYPE3_PROT:
2267 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2268 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2275 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2279 uint32_t transfer_length = 0;
2280 uint32_t data_bytes;
2282 uint8_t bundling = 1;
2284 struct crc_context *crc_ctx_pkt = NULL;
2285 struct qla_hw_data *ha;
2286 struct ctio_crc2_to_fw *pkt;
2287 dma_addr_t crc_ctx_dma;
2288 uint16_t fw_prot_opts = 0;
2289 struct qla_tgt_cmd *cmd = prm->cmd;
2290 struct se_cmd *se_cmd = &cmd->se_cmd;
2292 struct atio_from_isp *atio = &prm->cmd->atio;
2298 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
2300 memset(pkt, 0, sizeof(*pkt));
2302 ql_dbg(ql_dbg_tgt, vha, 0xe071,
2303 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2304 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2305 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2307 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2308 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2311 /* Compute dif len and adjust data len to incude protection */
2312 data_bytes = cmd->bufflen;
2313 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
2315 switch (se_cmd->prot_op) {
2316 case TARGET_PROT_DIN_INSERT:
2317 case TARGET_PROT_DOUT_STRIP:
2318 transfer_length = data_bytes;
2319 data_bytes += dif_bytes;
2322 case TARGET_PROT_DIN_STRIP:
2323 case TARGET_PROT_DOUT_INSERT:
2324 case TARGET_PROT_DIN_PASS:
2325 case TARGET_PROT_DOUT_PASS:
2326 transfer_length = data_bytes + dif_bytes;
2334 if (!qlt_hba_err_chk_enabled(se_cmd))
2335 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2336 /* HBA error checking enabled */
2337 else if (IS_PI_UNINIT_CAPABLE(ha)) {
2338 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2339 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2340 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2341 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2342 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2345 switch (se_cmd->prot_op) {
2346 case TARGET_PROT_DIN_INSERT:
2347 case TARGET_PROT_DOUT_INSERT:
2348 fw_prot_opts |= PO_MODE_DIF_INSERT;
2350 case TARGET_PROT_DIN_STRIP:
2351 case TARGET_PROT_DOUT_STRIP:
2352 fw_prot_opts |= PO_MODE_DIF_REMOVE;
2354 case TARGET_PROT_DIN_PASS:
2355 case TARGET_PROT_DOUT_PASS:
2356 fw_prot_opts |= PO_MODE_DIF_PASS;
2357 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2359 default:/* Normal Request */
2360 fw_prot_opts |= PO_MODE_DIF_PASS;
2366 /* Update entry type to indicate Command Type CRC_2 IOCB */
2367 pkt->entry_type = CTIO_CRC2;
2368 pkt->entry_count = 1;
2369 pkt->vp_index = vha->vp_idx;
2371 h = qlt_make_handle(vha);
2372 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2374 * CTIO type 7 from the firmware doesn't provide a way to
2375 * know the initiator's LOOP ID, hence we can't find
2376 * the session and, so, the command.
2380 ha->tgt.cmds[h-1] = prm->cmd;
2383 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2384 pkt->nport_handle = prm->cmd->loop_id;
2385 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2386 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2387 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2388 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2389 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2391 /* silence compile warning */
2392 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2393 pkt->ox_id = cpu_to_le16(t16);
2395 t16 = (atio->u.isp24.attr << 9);
2396 pkt->flags |= cpu_to_le16(t16);
2397 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2399 /* Set transfer direction */
2400 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2401 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2402 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2403 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2406 pkt->dseg_count = prm->tot_dsds;
2407 /* Fibre channel byte count */
2408 pkt->transfer_length = cpu_to_le32(transfer_length);
2411 /* ----- CRC context -------- */
2413 /* Allocate CRC context from global pool */
2414 crc_ctx_pkt = cmd->ctx =
2415 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2418 goto crc_queuing_error;
2420 /* Zero out CTX area. */
2421 clr_ptr = (uint8_t *)crc_ctx_pkt;
2422 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2424 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2425 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2428 crc_ctx_pkt->handle = pkt->handle;
2430 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
2432 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2433 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2434 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2438 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2441 * Configure Bundling if we need to fetch interlaving
2442 * protection PCI accesses
2444 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2445 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2446 crc_ctx_pkt->u.bundling.dseg_count =
2447 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2448 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2451 /* Finish the common fields of CRC pkt */
2452 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
2453 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
2454 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2455 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
2458 /* Walks data segments */
2459 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2461 if (!bundling && prm->prot_seg_cnt) {
2462 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2463 prm->tot_dsds, cmd))
2464 goto crc_queuing_error;
2465 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2466 (prm->tot_dsds - prm->prot_seg_cnt), cmd))
2467 goto crc_queuing_error;
2469 if (bundling && prm->prot_seg_cnt) {
2470 /* Walks dif segments */
2471 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2473 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2474 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2475 prm->prot_seg_cnt, cmd))
2476 goto crc_queuing_error;
2481 /* Cleanup will be performed by the caller */
2483 return QLA_FUNCTION_FAILED;
2488 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2489 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2491 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2492 uint8_t scsi_status)
2494 struct scsi_qla_host *vha = cmd->vha;
2495 struct qla_hw_data *ha = vha->hw;
2496 struct ctio7_to_24xx *pkt;
2497 struct qla_tgt_prm prm;
2498 uint32_t full_req_cnt = 0;
2499 unsigned long flags = 0;
2502 spin_lock_irqsave(&ha->hardware_lock, flags);
2503 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2504 cmd->state = QLA_TGT_STATE_PROCESSED;
2505 if (cmd->sess->logout_completed)
2506 /* no need to terminate. FW already freed exchange. */
2507 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2509 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2510 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2513 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2515 memset(&prm, 0, sizeof(prm));
2516 qlt_check_srr_debug(cmd, &xmit_type);
2518 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2519 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2520 (xmit_type & QLA_TGT_XMIT_STATUS) ?
2521 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
2524 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2526 if (unlikely(res != 0)) {
2527 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2533 spin_lock_irqsave(&ha->hardware_lock, flags);
2535 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2537 * Either a chip reset is active or this request was from
2538 * previous life, just abort the processing.
2540 cmd->state = QLA_TGT_STATE_PROCESSED;
2541 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2542 ql_dbg(ql_dbg_async, vha, 0xe101,
2543 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
2544 qla2x00_reset_active(vha), cmd->reset_count,
2546 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2550 /* Does F/W have an IOCBs for this request */
2551 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2553 goto out_unmap_unlock;
2555 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
2556 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2558 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2559 if (unlikely(res != 0)) {
2560 vha->req->cnt += full_req_cnt;
2561 goto out_unmap_unlock;
2564 pkt = (struct ctio7_to_24xx *)prm.pkt;
2566 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2567 pkt->u.status0.flags |=
2568 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2569 CTIO7_FLAGS_STATUS_MODE_0);
2571 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2572 qlt_load_data_segments(&prm, vha);
2574 if (prm.add_status_pkt == 0) {
2575 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2576 pkt->u.status0.scsi_status =
2577 cpu_to_le16(prm.rq_result);
2578 pkt->u.status0.residual =
2579 cpu_to_le32(prm.residual);
2580 pkt->u.status0.flags |= __constant_cpu_to_le16(
2581 CTIO7_FLAGS_SEND_STATUS);
2582 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2583 pkt->u.status0.flags |=
2584 __constant_cpu_to_le16(
2585 CTIO7_FLAGS_EXPLICIT_CONFORM |
2586 CTIO7_FLAGS_CONFORM_REQ);
2592 * We have already made sure that there is sufficient
2593 * amount of request entries to not drop HW lock in
2596 struct ctio7_to_24xx *ctio =
2597 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2599 ql_dbg(ql_dbg_io, vha, 0x305e,
2600 "Building additional status packet 0x%p.\n",
2604 * T10Dif: ctio_crc2_to_fw overlay ontop of
2607 memcpy(ctio, pkt, sizeof(*ctio));
2608 /* reset back to CTIO7 */
2609 ctio->entry_count = 1;
2610 ctio->entry_type = CTIO_TYPE7;
2611 ctio->dseg_count = 0;
2612 ctio->u.status1.flags &= ~__constant_cpu_to_le16(
2613 CTIO7_FLAGS_DATA_IN);
2615 /* Real finish is ctio_m1's finish */
2616 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2617 pkt->u.status0.flags |= __constant_cpu_to_le16(
2618 CTIO7_FLAGS_DONT_RET_CTIO);
2620 /* qlt_24xx_init_ctio_to_isp will correct
2621 * all neccessary fields that's part of CTIO7.
2622 * There should be no residual of CTIO-CRC2 data.
2624 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2626 pr_debug("Status CTIO7: %p\n", ctio);
2629 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2632 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2633 cmd->cmd_sent_to_fw = 1;
2635 /* Memory Barrier */
2637 qla2x00_start_iocbs(vha, vha->req);
2638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2643 qlt_unmap_sg(vha, cmd);
2644 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2648 EXPORT_SYMBOL(qlt_xmit_response);
2650 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2652 struct ctio7_to_24xx *pkt;
2653 struct scsi_qla_host *vha = cmd->vha;
2654 struct qla_hw_data *ha = vha->hw;
2655 struct qla_tgt *tgt = cmd->tgt;
2656 struct qla_tgt_prm prm;
2657 unsigned long flags;
2660 memset(&prm, 0, sizeof(prm));
2666 /* Send marker if required */
2667 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2670 /* Calculate number of entries and segments required */
2671 if (qlt_pci_map_calc_cnt(&prm) != 0)
2674 spin_lock_irqsave(&ha->hardware_lock, flags);
2676 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
2677 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2679 * Either a chip reset is active or this request was from
2680 * previous life, just abort the processing.
2682 cmd->state = QLA_TGT_STATE_NEED_DATA;
2683 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2684 ql_dbg(ql_dbg_async, vha, 0xe102,
2685 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
2686 qla2x00_reset_active(vha), cmd->reset_count,
2688 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2692 /* Does F/W have an IOCBs for this request */
2693 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2695 goto out_unlock_free_unmap;
2696 if (cmd->se_cmd.prot_op)
2697 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2699 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2701 if (unlikely(res != 0)) {
2702 vha->req->cnt += prm.req_cnt;
2703 goto out_unlock_free_unmap;
2706 pkt = (struct ctio7_to_24xx *)prm.pkt;
2707 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2708 CTIO7_FLAGS_STATUS_MODE_0);
2710 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2711 qlt_load_data_segments(&prm, vha);
2713 cmd->state = QLA_TGT_STATE_NEED_DATA;
2714 cmd->cmd_sent_to_fw = 1;
2716 /* Memory Barrier */
2718 qla2x00_start_iocbs(vha, vha->req);
2719 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2723 out_unlock_free_unmap:
2724 qlt_unmap_sg(vha, cmd);
2725 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2729 EXPORT_SYMBOL(qlt_rdy_to_xfer);
2733 * Checks the guard or meta-data for the type of error
2734 * detected by the HBA.
2737 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2738 struct ctio_crc_from_fw *sts)
2740 uint8_t *ap = &sts->actual_dif[0];
2741 uint8_t *ep = &sts->expected_dif[0];
2742 uint32_t e_ref_tag, a_ref_tag;
2743 uint16_t e_app_tag, a_app_tag;
2744 uint16_t e_guard, a_guard;
2745 uint64_t lba = cmd->se_cmd.t_task_lba;
2747 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
2748 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
2749 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
2751 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
2752 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
2753 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
2755 ql_dbg(ql_dbg_tgt, vha, 0xe075,
2756 "iocb(s) %p Returned STATUS.\n", sts);
2758 ql_dbg(ql_dbg_tgt, vha, 0xf075,
2759 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2760 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2761 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2765 * For type 3: ref & app tag is all 'f's
2766 * For type 0,1,2: app tag is all 'f's
2768 if ((a_app_tag == 0xffff) &&
2769 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
2770 (a_ref_tag == 0xffffffff))) {
2771 uint32_t blocks_done;
2773 /* 2TB boundary case covered automatically with this */
2774 blocks_done = e_ref_tag - (uint32_t)lba + 1;
2775 cmd->se_cmd.bad_sector = e_ref_tag;
2776 cmd->se_cmd.pi_err = 0;
2777 ql_dbg(ql_dbg_tgt, vha, 0xf074,
2778 "need to return scsi good\n");
2780 /* Update protection tag */
2781 if (cmd->prot_sg_cnt) {
2782 uint32_t i, j = 0, k = 0, num_ent;
2783 struct scatterlist *sg, *sgl;
2788 /* Patch the corresponding protection tags */
2789 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
2790 num_ent = sg_dma_len(sg) / 8;
2791 if (k + num_ent < blocks_done) {
2795 j = blocks_done - k - 1;
2800 if (k != blocks_done) {
2801 ql_log(ql_log_warn, vha, 0xf076,
2802 "unexpected tag values tag:lba=%u:%llu)\n",
2803 e_ref_tag, (unsigned long long)lba);
2808 struct sd_dif_tuple *spt;
2810 * This section came from initiator. Is it valid here?
2811 * should ulp be override with actual val???
2813 spt = page_address(sg_page(sg)) + sg->offset;
2816 spt->app_tag = 0xffff;
2817 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
2818 spt->ref_tag = 0xffffffff;
2826 if (e_guard != a_guard) {
2827 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
2828 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2830 ql_log(ql_log_warn, vha, 0xe076,
2831 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2832 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2833 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2834 a_guard, e_guard, cmd);
2839 if (e_ref_tag != a_ref_tag) {
2840 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
2841 cmd->se_cmd.bad_sector = e_ref_tag;
2843 ql_log(ql_log_warn, vha, 0xe077,
2844 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2845 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2846 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2847 a_guard, e_guard, cmd);
2851 /* check appl tag */
2852 if (e_app_tag != a_app_tag) {
2853 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
2854 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2856 ql_log(ql_log_warn, vha, 0xe078,
2857 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2858 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2859 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2860 a_guard, e_guard, cmd);
2868 /* If hardware_lock held on entry, might drop it, then reaquire */
2869 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2870 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2871 struct imm_ntfy_from_isp *ntfy)
2873 struct nack_to_isp *nack;
2874 struct qla_hw_data *ha = vha->hw;
2878 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
2879 "Sending TERM ELS CTIO (ha=%p)\n", ha);
2881 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2883 ql_dbg(ql_dbg_tgt, vha, 0xe080,
2884 "qla_target(%d): %s failed: unable to allocate "
2885 "request packet\n", vha->vp_idx, __func__);
2889 pkt->entry_type = NOTIFY_ACK_TYPE;
2890 pkt->entry_count = 1;
2891 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2893 nack = (struct nack_to_isp *)pkt;
2894 nack->ox_id = ntfy->ox_id;
2896 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
2897 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
2898 nack->u.isp24.flags = ntfy->u.isp24.flags &
2899 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
2903 nack->u.isp24.flags |=
2904 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
2906 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
2907 nack->u.isp24.status = ntfy->u.isp24.status;
2908 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
2909 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
2910 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
2911 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
2912 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
2913 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
2915 qla2x00_start_iocbs(vha, vha->req);
2919 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2920 struct imm_ntfy_from_isp *imm, int ha_locked)
2922 unsigned long flags = 0;
2925 if (qlt_issue_marker(vha, ha_locked) < 0)
2929 rc = __qlt_send_term_imm_notif(vha, imm);
2933 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2938 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2939 rc = __qlt_send_term_imm_notif(vha, imm);
2943 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2948 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2951 /* If hardware_lock held on entry, might drop it, then reaquire */
2952 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2953 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2954 struct qla_tgt_cmd *cmd,
2955 struct atio_from_isp *atio)
2957 struct ctio7_to_24xx *ctio24;
2958 struct qla_hw_data *ha = vha->hw;
2963 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2965 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2967 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2968 "qla_target(%d): %s failed: unable to allocate "
2969 "request packet\n", vha->vp_idx, __func__);
2974 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2975 ql_dbg(ql_dbg_tgt, vha, 0xe051,
2976 "qla_target(%d): Terminating cmd %p with "
2977 "incorrect state %d\n", vha->vp_idx, cmd,
2983 pkt->entry_count = 1;
2984 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2986 ctio24 = (struct ctio7_to_24xx *)pkt;
2987 ctio24->entry_type = CTIO_TYPE7;
2988 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2989 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2990 ctio24->vp_index = vha->vp_idx;
2991 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2992 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2993 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2994 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2995 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2996 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2997 CTIO7_FLAGS_TERMINATE);
2998 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2999 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3001 /* Most likely, it isn't needed */
3002 ctio24->u.status1.residual = get_unaligned((uint32_t *)
3003 &atio->u.isp24.fcp_cmnd.add_cdb[
3004 atio->u.isp24.fcp_cmnd.add_cdb_len]);
3005 if (ctio24->u.status1.residual != 0)
3006 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3008 /* Memory Barrier */
3010 qla2x00_start_iocbs(vha, vha->req);
3014 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3015 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
3017 unsigned long flags = 0;
3020 if (qlt_issue_marker(vha, ha_locked) < 0)
3024 rc = __qlt_send_term_exchange(vha, cmd, atio);
3026 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3029 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3030 rc = __qlt_send_term_exchange(vha, cmd, atio);
3032 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3035 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
3036 !cmd->cmd_sent_to_fw)) {
3038 qlt_unmap_sg(vha, cmd);
3039 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3043 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3048 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3050 struct list_head free_list;
3051 struct qla_tgt_cmd *cmd, *tcmd;
3053 vha->hw->tgt.leak_exchg_thresh_hold =
3054 (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3057 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3058 INIT_LIST_HEAD(&free_list);
3059 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3061 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3062 list_del(&cmd->cmd_list);
3063 /* This cmd was never sent to TCM. There is no need
3064 * to schedule free or call free_cmd
3067 vha->hw->tgt.num_qfull_cmds_alloc--;
3070 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3073 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3075 uint32_t total_leaked;
3077 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3079 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3080 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3082 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3083 "Chip reset due to exchange starvation: %d/%d.\n",
3084 total_leaked, vha->hw->fw_xcb_count);
3086 if (IS_P3P_TYPE(vha->hw))
3087 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3089 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3090 qla2xxx_wake_dpc(vha);
3095 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3097 struct qla_tgt_sess *sess = cmd->sess;
3099 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3100 "%s: se_cmd[%p] ox_id %04x\n",
3101 __func__, &cmd->se_cmd,
3102 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3104 BUG_ON(cmd->cmd_in_wq);
3107 qlt_decr_num_pend_cmds(cmd->vha);
3109 BUG_ON(cmd->sg_mapped);
3110 cmd->jiffies_at_free = get_jiffies_64();
3111 if (unlikely(cmd->free_sg))
3114 if (!sess || !sess->se_sess) {
3118 cmd->jiffies_at_free = get_jiffies_64();
3119 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3121 EXPORT_SYMBOL(qlt_free_cmd);
3123 /* ha->hardware_lock supposed to be held on entry */
3124 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
3125 struct qla_tgt_cmd *cmd, void *ctio)
3127 struct qla_tgt_srr_ctio *sc;
3128 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3129 struct qla_tgt_srr_imm *imm;
3132 cmd->cmd_flags |= BIT_15;
3134 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
3135 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
3138 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
3139 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
3144 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
3147 /* IRQ is already OFF */
3148 spin_lock(&tgt->srr_lock);
3149 sc->srr_id = tgt->ctio_srr_id;
3150 list_add_tail(&sc->srr_list_entry,
3151 &tgt->srr_ctio_list);
3152 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
3153 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
3154 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3156 list_for_each_entry(imm, &tgt->srr_imm_list,
3158 if (imm->srr_id == sc->srr_id) {
3164 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
3165 "Scheduling srr work\n");
3166 schedule_work(&tgt->srr_work);
3168 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
3169 "qla_target(%d): imm_srr_id "
3170 "== ctio_srr_id (%d), but there is no "
3171 "corresponding SRR IMM, deleting CTIO "
3172 "SRR %p\n", vha->vp_idx,
3173 tgt->ctio_srr_id, sc);
3174 list_del(&sc->srr_list_entry);
3175 spin_unlock(&tgt->srr_lock);
3181 spin_unlock(&tgt->srr_lock);
3183 struct qla_tgt_srr_imm *ti;
3185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
3186 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
3188 spin_lock(&tgt->srr_lock);
3189 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
3191 if (imm->srr_id == tgt->ctio_srr_id) {
3192 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
3193 "IMM SRR %p deleted (id %d)\n",
3195 list_del(&imm->srr_list_entry);
3196 qlt_reject_free_srr_imm(vha, imm, 1);
3199 spin_unlock(&tgt->srr_lock);
3208 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3210 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3211 struct qla_tgt_cmd *cmd, uint32_t status)
3216 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3218 __constant_cpu_to_le16(OF_TERM_EXCH));
3223 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3228 /* ha->hardware_lock supposed to be held on entry */
3229 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
3232 struct qla_hw_data *ha = vha->hw;
3235 if (ha->tgt.cmds[handle] != NULL) {
3236 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
3237 ha->tgt.cmds[handle] = NULL;
3243 /* ha->hardware_lock supposed to be held on entry */
3244 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3245 uint32_t handle, void *ctio)
3247 struct qla_tgt_cmd *cmd = NULL;
3249 /* Clear out internal marks */
3250 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
3251 CTIO_INTERMEDIATE_HANDLE_MARK);
3253 if (handle != QLA_TGT_NULL_HANDLE) {
3254 if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
3257 /* handle-1 is actually used */
3258 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
3259 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3260 "qla_target(%d): Wrong handle %x received\n",
3261 vha->vp_idx, handle);
3264 cmd = qlt_get_cmd(vha, handle);
3265 if (unlikely(cmd == NULL)) {
3266 ql_dbg(ql_dbg_tgt, vha, 0xe053,
3267 "qla_target(%d): Suspicious: unable to "
3268 "find the command with handle %x\n", vha->vp_idx,
3272 } else if (ctio != NULL) {
3273 /* We can't get loop ID from CTIO7 */
3274 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3275 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3276 "support NULL handles\n", vha->vp_idx);
3283 /* hardware_lock should be held by caller. */
3285 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3287 struct qla_hw_data *ha = vha->hw;
3291 qlt_unmap_sg(vha, cmd);
3293 handle = qlt_make_handle(vha);
3295 /* TODO: fix debug message type and ids. */
3296 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3297 ql_dbg(ql_dbg_io, vha, 0xff00,
3298 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
3299 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3300 cmd->write_data_transferred = 0;
3301 cmd->state = QLA_TGT_STATE_DATA_IN;
3303 ql_dbg(ql_dbg_io, vha, 0xff01,
3304 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
3306 ha->tgt.tgt_ops->handle_data(cmd);
3308 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3309 ql_dbg(ql_dbg_io, vha, 0xff02,
3310 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
3312 ql_dbg(ql_dbg_io, vha, 0xff03,
3313 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
3318 cmd->cmd_flags |= BIT_17;
3319 ha->tgt.tgt_ops->free_cmd(cmd);
3323 qlt_host_reset_handler(struct qla_hw_data *ha)
3325 struct qla_tgt_cmd *cmd;
3326 unsigned long flags;
3327 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3328 scsi_qla_host_t *vha = NULL;
3329 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
3332 if (!base_vha->hw->tgt.tgt_ops)
3335 if (!tgt || qla_ini_mode_enabled(base_vha)) {
3336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
3337 "Target mode disabled\n");
3341 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
3342 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3343 base_vha->dpc_flags);
3345 spin_lock_irqsave(&ha->hardware_lock, flags);
3346 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
3347 cmd = qlt_get_cmd(base_vha, i);
3350 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3352 qlt_abort_cmd_on_host_reset(vha, cmd);
3354 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3359 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3361 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3362 uint32_t status, void *ctio)
3364 struct qla_hw_data *ha = vha->hw;
3365 struct se_cmd *se_cmd;
3366 const struct target_core_fabric_ops *tfo;
3367 struct qla_tgt_cmd *cmd;
3369 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3370 /* That could happen only in case of an error/reset/abort */
3371 if (status != CTIO_SUCCESS) {
3372 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3373 "Intermediate CTIO received"
3374 " (status %x)\n", status);
3379 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
3383 se_cmd = &cmd->se_cmd;
3384 tfo = se_cmd->se_tfo;
3385 cmd->cmd_sent_to_fw = 0;
3387 qlt_unmap_sg(vha, cmd);
3389 if (unlikely(status != CTIO_SUCCESS)) {
3390 switch (status & 0xFFFF) {
3391 case CTIO_LIP_RESET:
3392 case CTIO_TARGET_RESET:
3394 /* driver request abort via Terminate exchange */
3396 case CTIO_INVALID_RX_ID:
3398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3399 "qla_target(%d): CTIO with "
3400 "status %#x received, state %x, se_cmd %p, "
3401 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3402 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3403 status, cmd->state, se_cmd);
3406 case CTIO_PORT_LOGGED_OUT:
3407 case CTIO_PORT_UNAVAILABLE:
3408 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3409 "qla_target(%d): CTIO with PORT LOGGED "
3410 "OUT (29) or PORT UNAVAILABLE (28) status %x "
3411 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3412 status, cmd->state, se_cmd);
3415 case CTIO_SRR_RECEIVED:
3416 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
3417 "qla_target(%d): CTIO with SRR_RECEIVED"
3418 " status %x received (state %x, se_cmd %p)\n",
3419 vha->vp_idx, status, cmd->state, se_cmd);
3420 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
3425 case CTIO_DIF_ERROR: {
3426 struct ctio_crc_from_fw *crc =
3427 (struct ctio_crc_from_fw *)ctio;
3428 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3429 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3430 vha->vp_idx, status, cmd->state, se_cmd,
3431 *((u64 *)&crc->actual_dif[0]),
3432 *((u64 *)&crc->expected_dif[0]));
3434 if (qlt_handle_dif_error(vha, cmd, ctio)) {
3435 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3436 /* scsi Write/xfer rdy complete */
3439 /* scsi read/xmit respond complete
3440 * call handle dif to send scsi status
3441 * rather than terminate exchange.
3443 cmd->state = QLA_TGT_STATE_PROCESSED;
3444 ha->tgt.tgt_ops->handle_dif_err(cmd);
3448 /* Need to generate a SCSI good completion.
3449 * because FW did not send scsi status.
3457 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3458 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3459 vha->vp_idx, status, cmd->state, se_cmd);
3464 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
3465 * cmd is already aborted/terminated, we don't
3466 * need to terminate again. The exchange is already
3467 * cleaned up/freed at FW level. Just cleanup at driver
3470 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3471 (cmd->state != QLA_TGT_STATE_ABORTED)) {
3472 cmd->cmd_flags |= BIT_13;
3473 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3479 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3480 cmd->cmd_flags |= BIT_12;
3481 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3484 cmd->state = QLA_TGT_STATE_DATA_IN;
3486 if (unlikely(status != CTIO_SUCCESS))
3489 cmd->write_data_transferred = 1;
3491 ha->tgt.tgt_ops->handle_data(cmd);
3493 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3494 cmd->cmd_flags |= BIT_18;
3495 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3496 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3498 cmd->cmd_flags |= BIT_19;
3499 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3500 "qla_target(%d): A command in state (%d) should "
3501 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3504 if (unlikely(status != CTIO_SUCCESS) &&
3505 (cmd->state != QLA_TGT_STATE_ABORTED)) {
3506 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3510 ha->tgt.tgt_ops->free_cmd(cmd);
3513 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3518 switch (task_codes) {
3519 case ATIO_SIMPLE_QUEUE:
3520 fcp_task_attr = TCM_SIMPLE_TAG;
3522 case ATIO_HEAD_OF_QUEUE:
3523 fcp_task_attr = TCM_HEAD_TAG;
3525 case ATIO_ORDERED_QUEUE:
3526 fcp_task_attr = TCM_ORDERED_TAG;
3528 case ATIO_ACA_QUEUE:
3529 fcp_task_attr = TCM_ACA_TAG;
3532 fcp_task_attr = TCM_SIMPLE_TAG;
3535 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3536 "qla_target: unknown task code %x, use ORDERED instead\n",
3538 fcp_task_attr = TCM_ORDERED_TAG;
3542 return fcp_task_attr;
3545 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
3548 * Process context for I/O path into tcm_qla2xxx code
3550 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3552 scsi_qla_host_t *vha = cmd->vha;
3553 struct qla_hw_data *ha = vha->hw;
3554 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3555 struct qla_tgt_sess *sess = cmd->sess;
3556 struct atio_from_isp *atio = &cmd->atio;
3558 unsigned long flags;
3559 uint32_t data_length;
3560 int ret, fcp_task_attr, data_dir, bidi = 0;
3563 cmd->cmd_flags |= BIT_1;
3567 if (cmd->state == QLA_TGT_STATE_ABORTED) {
3568 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3569 "cmd with tag %u is aborted\n",
3570 cmd->atio.u.isp24.exchange_addr);
3574 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3575 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3576 cmd->unpacked_lun = scsilun_to_int(
3577 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
3579 if (atio->u.isp24.fcp_cmnd.rddata &&
3580 atio->u.isp24.fcp_cmnd.wrdata) {
3582 data_dir = DMA_TO_DEVICE;
3583 } else if (atio->u.isp24.fcp_cmnd.rddata)
3584 data_dir = DMA_FROM_DEVICE;
3585 else if (atio->u.isp24.fcp_cmnd.wrdata)
3586 data_dir = DMA_TO_DEVICE;
3588 data_dir = DMA_NONE;
3590 fcp_task_attr = qlt_get_fcp_task_attr(vha,
3591 atio->u.isp24.fcp_cmnd.task_attr);
3592 data_length = be32_to_cpu(get_unaligned((uint32_t *)
3593 &atio->u.isp24.fcp_cmnd.add_cdb[
3594 atio->u.isp24.fcp_cmnd.add_cdb_len]));
3596 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3597 fcp_task_attr, data_dir, bidi);
3601 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3603 spin_lock_irqsave(&ha->hardware_lock, flags);
3604 ha->tgt.tgt_ops->put_sess(sess);
3605 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3609 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
3611 * cmd has not sent to target yet, so pass NULL as the second
3612 * argument to qlt_send_term_exchange() and free the memory here.
3614 cmd->cmd_flags |= BIT_2;
3615 spin_lock_irqsave(&ha->hardware_lock, flags);
3616 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
3618 qlt_decr_num_pend_cmds(vha);
3619 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3620 ha->tgt.tgt_ops->put_sess(sess);
3621 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3624 static void qlt_do_work(struct work_struct *work)
3626 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3627 scsi_qla_host_t *vha = cmd->vha;
3628 unsigned long flags;
3630 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3631 list_del(&cmd->cmd_list);
3632 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3637 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3638 struct qla_tgt_sess *sess,
3639 struct atio_from_isp *atio)
3641 struct se_session *se_sess = sess->se_sess;
3642 struct qla_tgt_cmd *cmd;
3645 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3649 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3650 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3652 memcpy(&cmd->atio, atio, sizeof(*atio));
3653 cmd->state = QLA_TGT_STATE_NEW;
3654 cmd->tgt = vha->vha_tgt.qla_tgt;
3655 qlt_incr_num_pend_cmds(vha);
3657 cmd->se_cmd.map_tag = tag;
3659 cmd->loop_id = sess->loop_id;
3660 cmd->conf_compl_supported = sess->conf_compl_supported;
3663 cmd->jiffies_at_alloc = get_jiffies_64();
3665 cmd->reset_count = vha->hw->chip_reset;
3670 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3673 static void qlt_create_sess_from_atio(struct work_struct *work)
3675 struct qla_tgt_sess_op *op = container_of(work,
3676 struct qla_tgt_sess_op, work);
3677 scsi_qla_host_t *vha = op->vha;
3678 struct qla_hw_data *ha = vha->hw;
3679 struct qla_tgt_sess *sess;
3680 struct qla_tgt_cmd *cmd;
3681 unsigned long flags;
3682 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3684 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3685 list_del(&op->cmd_list);
3686 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3689 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3690 "sess_op with tag %u is aborted\n",
3691 op->atio.u.isp24.exchange_addr);
3695 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3696 "qla_target(%d): Unable to find wwn login"
3697 " (s_id %x:%x:%x), trying to create it manually\n",
3698 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3700 if (op->atio.u.raw.entry_count > 1) {
3701 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3702 "Dropping multy entry atio %p\n", &op->atio);
3706 mutex_lock(&vha->vha_tgt.tgt_mutex);
3707 sess = qlt_make_local_sess(vha, s_id);
3708 /* sess has an extra creation ref. */
3709 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3714 * Now obtain a pre-allocated session tag using the original op->atio
3715 * packet header, and dispatch into __qlt_do_work() using the existing
3718 cmd = qlt_get_tag(vha, sess, &op->atio);
3720 spin_lock_irqsave(&ha->hardware_lock, flags);
3721 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
3722 ha->tgt.tgt_ops->put_sess(sess);
3723 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3728 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3729 * the extra reference taken above by qlt_make_local_sess()
3736 spin_lock_irqsave(&ha->hardware_lock, flags);
3737 qlt_send_term_exchange(vha, NULL, &op->atio, 1);
3738 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3743 /* ha->hardware_lock supposed to be held on entry */
3744 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3745 struct atio_from_isp *atio)
3747 struct qla_hw_data *ha = vha->hw;
3748 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3749 struct qla_tgt_sess *sess;
3750 struct qla_tgt_cmd *cmd;
3752 if (unlikely(tgt->tgt_stop)) {
3753 ql_dbg(ql_dbg_io, vha, 0x3061,
3754 "New command while device %p is shutting down\n", tgt);
3758 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3759 if (unlikely(!sess)) {
3760 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3765 memcpy(&op->atio, atio, sizeof(*atio));
3768 spin_lock(&vha->cmd_list_lock);
3769 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3770 spin_unlock(&vha->cmd_list_lock);
3772 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3773 queue_work(qla_tgt_wq, &op->work);
3777 /* Another WWN used to have our s_id. Our PLOGI scheduled its
3778 * session deletion, but it's still in sess_del_work wq */
3779 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3780 ql_dbg(ql_dbg_io, vha, 0x3061,
3781 "New command while old session %p is being deleted\n",
3787 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3789 kref_get(&sess->se_sess->sess_kref);
3791 cmd = qlt_get_tag(vha, sess, atio);
3793 ql_dbg(ql_dbg_io, vha, 0x3062,
3794 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
3795 ha->tgt.tgt_ops->put_sess(sess);
3800 cmd->cmd_flags |= BIT_0;
3802 spin_lock(&vha->cmd_list_lock);
3803 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
3804 spin_unlock(&vha->cmd_list_lock);
3806 INIT_WORK(&cmd->work, qlt_do_work);
3807 queue_work(qla_tgt_wq, &cmd->work);
3812 /* ha->hardware_lock supposed to be held on entry */
3813 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3814 int fn, void *iocb, int flags)
3816 struct scsi_qla_host *vha = sess->vha;
3817 struct qla_hw_data *ha = vha->hw;
3818 struct qla_tgt_mgmt_cmd *mcmd;
3819 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3823 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
3825 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
3826 "qla_target(%d): Allocation of management "
3827 "command failed, some commands and their data could "
3828 "leak\n", vha->vp_idx);
3831 memset(mcmd, 0, sizeof(*mcmd));
3835 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
3836 sizeof(mcmd->orig_iocb.imm_ntfy));
3838 mcmd->tmr_func = fn;
3839 mcmd->flags = flags;
3840 mcmd->reset_count = vha->hw->chip_reset;
3843 case QLA_TGT_CLEAR_ACA:
3844 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
3845 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
3846 tmr_func = TMR_CLEAR_ACA;
3849 case QLA_TGT_TARGET_RESET:
3850 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
3851 "qla_target(%d): TARGET_RESET received\n",
3853 tmr_func = TMR_TARGET_WARM_RESET;
3856 case QLA_TGT_LUN_RESET:
3857 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3858 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3859 tmr_func = TMR_LUN_RESET;
3860 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
3863 case QLA_TGT_CLEAR_TS:
3864 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
3865 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
3866 tmr_func = TMR_CLEAR_TASK_SET;
3869 case QLA_TGT_ABORT_TS:
3870 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
3871 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
3872 tmr_func = TMR_ABORT_TASK_SET;
3875 case QLA_TGT_ABORT_ALL:
3876 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
3877 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
3882 case QLA_TGT_ABORT_ALL_SESS:
3883 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
3884 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
3889 case QLA_TGT_NEXUS_LOSS_SESS:
3890 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
3891 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
3896 case QLA_TGT_NEXUS_LOSS:
3897 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
3898 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
3903 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
3904 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
3905 sess->vha->vp_idx, fn);
3906 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3910 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
3912 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
3913 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
3914 sess->vha->vp_idx, res);
3915 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3922 /* ha->hardware_lock supposed to be held on entry */
3923 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3925 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3926 struct qla_hw_data *ha = vha->hw;
3927 struct qla_tgt *tgt;
3928 struct qla_tgt_sess *sess;
3929 uint32_t lun, unpacked_lun;
3932 tgt = vha->vha_tgt.qla_tgt;
3934 lun = a->u.isp24.fcp_cmnd.lun;
3935 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
3936 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
3937 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3938 a->u.isp24.fcp_hdr.s_id);
3939 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3942 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
3943 "qla_target(%d): task mgmt fn 0x%x for "
3944 "non-existant session\n", vha->vp_idx, fn);
3945 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
3946 sizeof(struct atio_from_isp));
3949 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
3952 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3955 /* ha->hardware_lock supposed to be held on entry */
3956 static int __qlt_abort_task(struct scsi_qla_host *vha,
3957 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
3959 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3960 struct qla_hw_data *ha = vha->hw;
3961 struct qla_tgt_mgmt_cmd *mcmd;
3962 uint32_t lun, unpacked_lun;
3965 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
3967 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
3968 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
3969 vha->vp_idx, __func__);
3972 memset(mcmd, 0, sizeof(*mcmd));
3975 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
3976 sizeof(mcmd->orig_iocb.imm_ntfy));
3978 lun = a->u.isp24.fcp_cmnd.lun;
3979 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3980 mcmd->reset_count = vha->hw->chip_reset;
3982 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
3983 le16_to_cpu(iocb->u.isp2x.seq_id));
3985 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
3986 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
3988 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3995 /* ha->hardware_lock supposed to be held on entry */
3996 static int qlt_abort_task(struct scsi_qla_host *vha,
3997 struct imm_ntfy_from_isp *iocb)
3999 struct qla_hw_data *ha = vha->hw;
4000 struct qla_tgt_sess *sess;
4003 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4005 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4007 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4008 "qla_target(%d): task abort for unexisting "
4009 "session\n", vha->vp_idx);
4010 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4011 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4014 return __qlt_abort_task(vha, iocb, sess);
4017 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4019 if (fcport->tgt_session) {
4020 if (rc != MBS_COMMAND_COMPLETE) {
4021 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4022 "%s: se_sess %p / sess %p from"
4023 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4024 " LOGO failed: %#x\n",
4026 fcport->tgt_session->se_sess,
4027 fcport->tgt_session,
4028 fcport->port_name, fcport->loop_id,
4029 fcport->d_id.b.domain, fcport->d_id.b.area,
4030 fcport->d_id.b.al_pa, rc);
4033 fcport->tgt_session->logout_completed = 1;
4037 static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
4038 struct imm_ntfy_from_isp *b)
4040 struct imm_ntfy_from_isp tmp;
4041 memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
4042 memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
4043 memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
4047 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4049 * Schedules sessions with matching port_id/loop_id but different wwn for
4050 * deletion. Returns existing session with matching wwn if present.
4053 static struct qla_tgt_sess *
4054 qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4055 port_id_t port_id, uint16_t loop_id)
4057 struct qla_tgt_sess *sess = NULL, *other_sess;
4060 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4062 other_wwn = wwn_to_u64(other_sess->port_name);
4064 if (wwn == other_wwn) {
4070 /* find other sess with nport_id collision */
4071 if (port_id.b24 == other_sess->s_id.b24) {
4072 if (loop_id != other_sess->loop_id) {
4073 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4074 "Invalidating sess %p loop_id %d wwn %llx.\n",
4075 other_sess, other_sess->loop_id, other_wwn);
4078 * logout_on_delete is set by default, but another
4079 * session that has the same s_id/loop_id combo
4080 * might have cleared it when requested this session
4081 * deletion, so don't touch it
4083 qlt_schedule_sess_for_deletion(other_sess, true);
4086 * Another wwn used to have our s_id/loop_id
4087 * combo - kill the session, but don't log out
4089 sess->logout_on_delete = 0;
4090 qlt_schedule_sess_for_deletion(other_sess,
4096 /* find other sess with nport handle collision */
4097 if (loop_id == other_sess->loop_id) {
4098 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4099 "Invalidating sess %p loop_id %d wwn %llx.\n",
4100 other_sess, other_sess->loop_id, other_wwn);
4102 /* Same loop_id but different s_id
4103 * Ok to kill and logout */
4104 qlt_schedule_sess_for_deletion(other_sess, true);
4111 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4112 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4114 struct qla_tgt_sess_op *op;
4115 struct qla_tgt_cmd *cmd;
4119 key = (((u32)s_id->b.domain << 16) |
4120 ((u32)s_id->b.area << 8) |
4121 ((u32)s_id->b.al_pa));
4123 spin_lock(&vha->cmd_list_lock);
4124 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4125 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4126 if (op_key == key) {
4131 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4132 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4133 if (cmd_key == key) {
4134 cmd->state = QLA_TGT_STATE_ABORTED;
4138 spin_unlock(&vha->cmd_list_lock);
4144 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4146 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4147 struct imm_ntfy_from_isp *iocb)
4149 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4150 struct qla_hw_data *ha = vha->hw;
4151 struct qla_tgt_sess *sess = NULL;
4158 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4160 port_id.b.domain = iocb->u.isp24.port_id[2];
4161 port_id.b.area = iocb->u.isp24.port_id[1];
4162 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4163 port_id.b.rsvd_1 = 0;
4165 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4167 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
4168 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
4169 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
4171 /* res = 1 means ack at the end of thread
4172 * res = 0 means ack async/later.
4174 switch (iocb->u.isp24.status_subcode) {
4177 /* Mark all stale commands in qla_tgt_wq for deletion */
4178 abort_cmds_for_s_id(vha, &port_id);
4181 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4184 if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
4189 if (sess->plogi_ack_needed) {
4191 * Initiator sent another PLOGI before last PLOGI could
4192 * finish. Swap plogi iocbs and terminate old one
4193 * without acking, new one will get acked when session
4194 * deletion completes.
4196 ql_log(ql_log_warn, sess->vha, 0xf094,
4197 "sess %p received double plogi.\n", sess);
4199 qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
4201 qlt_send_term_imm_notif(vha, iocb, 1);
4210 * Save immediate Notif IOCB for Ack when sess is done
4211 * and being deleted.
4213 memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
4214 sess->plogi_ack_needed = 1;
4217 * Under normal circumstances we want to release nport handle
4218 * during LOGO process to avoid nport handle leaks inside FW.
4219 * The exception is when LOGO is done while another PLOGI with
4220 * the same nport handle is waiting as might be the case here.
4221 * Note: there is always a possibily of a race where session
4222 * deletion has already started for other reasons (e.g. ACL
4223 * removal) and now PLOGI arrives:
4224 * 1. if PLOGI arrived in FW after nport handle has been freed,
4225 * FW must have assigned this PLOGI a new/same handle and we
4226 * can proceed ACK'ing it as usual when session deletion
4228 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4229 * bit reached it, the handle has now been released. We'll
4230 * get an error when we ACK this PLOGI. Nothing will be sent
4231 * back to initiator. Initiator should eventually retry
4232 * PLOGI and situation will correct itself.
4234 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4235 (sess->s_id.b24 == port_id.b24));
4236 qlt_schedule_sess_for_deletion(sess, true);
4240 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4243 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4247 if (sess->deleted) {
4249 * Impatient initiator sent PRLI before last
4250 * PLOGI could finish. Will force him to re-try,
4251 * while last one finishes.
4253 ql_log(ql_log_warn, sess->vha, 0xf095,
4254 "sess %p PRLI received, before plogi ack.\n",
4256 qlt_send_term_imm_notif(vha, iocb, 1);
4262 * This shouldn't happen under normal circumstances,
4263 * since we have deleted the old session during PLOGI
4265 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4266 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4267 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4270 sess->loop_id = loop_id;
4271 sess->s_id = port_id;
4274 sess->conf_compl_supported = 1;
4277 res = 1; /* send notify ack */
4279 /* Make session global (not used in fabric mode) */
4280 if (ha->current_topology != ISP_CFG_F) {
4281 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4282 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4283 qla2xxx_wake_dpc(vha);
4285 /* todo: else - create sess here. */
4286 res = 1; /* send notify ack */
4293 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4298 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4299 if (tgt->link_reinit_iocb_pending) {
4300 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
4302 tgt->link_reinit_iocb_pending = 0;
4304 res = 1; /* send notify ack */
4308 case ELS_FLOGI: /* should never happen */
4310 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
4311 "qla_target(%d): Unsupported ELS command %x "
4312 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
4313 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4320 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
4324 * FIXME: Reject non zero SRR relative offset until we can test
4325 * this code properly.
4327 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
4330 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
4331 size_t first_offset = 0, rem_offset = offset, tmp = 0;
4332 int i, sg_srr_cnt, bufflen = 0;
4334 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
4335 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
4336 "cmd->sg_cnt: %u, direction: %d\n",
4337 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
4339 if (!cmd->sg || !cmd->sg_cnt) {
4340 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
4341 "Missing cmd->sg or zero cmd->sg_cnt in"
4342 " qla_tgt_set_data_offset\n");
4346 * Walk the current cmd->sg list until we locate the new sg_srr_start
4348 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
4349 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
4350 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
4351 i, sg, sg_page(sg), sg->length, sg->offset);
4353 if ((sg->length + tmp) > offset) {
4354 first_offset = rem_offset;
4356 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
4357 "Found matching sg[%d], using %p as sg_srr_start, "
4358 "and using first_offset: %zu\n", i, sg,
4363 rem_offset -= sg->length;
4366 if (!sg_srr_start) {
4367 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
4368 "Unable to locate sg_srr_start for offset: %u\n", offset);
4371 sg_srr_cnt = (cmd->sg_cnt - i);
4373 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
4375 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
4376 "Unable to allocate sgp\n");
4379 sg_init_table(sg_srr, sg_srr_cnt);
4382 * Walk the remaining list for sg_srr_start, mapping to the newly
4383 * allocated sg_srr taking first_offset into account.
4385 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
4387 sg_set_page(sgp, sg_page(sg),
4388 (sg->length - first_offset), first_offset);
4391 sg_set_page(sgp, sg_page(sg), sg->length, 0);
4393 bufflen += sgp->length;
4401 cmd->sg_cnt = sg_srr_cnt;
4402 cmd->bufflen = bufflen;
4403 cmd->offset += offset;
4406 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
4407 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
4409 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
4411 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
4414 if (cmd->sg_cnt < 0)
4417 if (cmd->bufflen < 0)
4424 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
4425 uint32_t srr_rel_offs, int *xmit_type)
4427 int res = 0, rel_offs;
4429 rel_offs = srr_rel_offs - cmd->offset;
4430 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
4431 srr_rel_offs, rel_offs);
4433 *xmit_type = QLA_TGT_XMIT_ALL;
4436 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
4437 "qla_target(%d): SRR rel_offs (%d) < 0",
4438 cmd->vha->vp_idx, rel_offs);
4440 } else if (rel_offs == cmd->bufflen)
4441 *xmit_type = QLA_TGT_XMIT_STATUS;
4442 else if (rel_offs > 0)
4443 res = qlt_set_data_offset(cmd, rel_offs);
4448 /* No locks, thread context */
4449 static void qlt_handle_srr(struct scsi_qla_host *vha,
4450 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
4452 struct imm_ntfy_from_isp *ntfy =
4453 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
4454 struct qla_hw_data *ha = vha->hw;
4455 struct qla_tgt_cmd *cmd = sctio->cmd;
4456 struct se_cmd *se_cmd = &cmd->se_cmd;
4457 unsigned long flags;
4458 int xmit_type = 0, resp = 0;
4462 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
4463 srr_ui = ntfy->u.isp24.srr_ui;
4465 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
4470 spin_lock_irqsave(&ha->hardware_lock, flags);
4471 qlt_send_notify_ack(vha, ntfy,
4472 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4473 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4474 xmit_type = QLA_TGT_XMIT_STATUS;
4477 case SRR_IU_DATA_IN:
4478 if (!cmd->sg || !cmd->sg_cnt) {
4479 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
4480 "Unable to process SRR_IU_DATA_IN due to"
4481 " missing cmd->sg, state: %d\n", cmd->state);
4485 if (se_cmd->scsi_status != 0) {
4486 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
4487 "Rejecting SRR_IU_DATA_IN with non GOOD "
4491 cmd->bufflen = se_cmd->data_length;
4493 if (qlt_has_data(cmd)) {
4494 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
4496 spin_lock_irqsave(&ha->hardware_lock, flags);
4497 qlt_send_notify_ack(vha, ntfy,
4498 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4502 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
4503 "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
4504 vha->vp_idx, se_cmd->tag,
4505 cmd->se_cmd.scsi_status);
4509 case SRR_IU_DATA_OUT:
4510 if (!cmd->sg || !cmd->sg_cnt) {
4511 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
4512 "Unable to process SRR_IU_DATA_OUT due to"
4513 " missing cmd->sg\n");
4517 if (se_cmd->scsi_status != 0) {
4518 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
4519 "Rejecting SRR_IU_DATA_OUT"
4520 " with non GOOD scsi_status\n");
4523 cmd->bufflen = se_cmd->data_length;
4525 if (qlt_has_data(cmd)) {
4526 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
4528 spin_lock_irqsave(&ha->hardware_lock, flags);
4529 qlt_send_notify_ack(vha, ntfy,
4530 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4531 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4532 if (xmit_type & QLA_TGT_XMIT_DATA) {
4533 cmd->cmd_flags |= BIT_8;
4534 qlt_rdy_to_xfer(cmd);
4537 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
4538 "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
4539 vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
4544 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
4545 "qla_target(%d): Unknown srr_ui value %x",
4546 vha->vp_idx, srr_ui);
4550 /* Transmit response in case of status and data-in cases */
4552 cmd->cmd_flags |= BIT_7;
4553 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
4559 spin_lock_irqsave(&ha->hardware_lock, flags);
4560 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
4561 NOTIFY_ACK_SRR_FLAGS_REJECT,
4562 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4563 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4564 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4565 cmd->state = QLA_TGT_STATE_DATA_IN;
4568 cmd->cmd_flags |= BIT_9;
4569 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
4571 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4574 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
4575 struct qla_tgt_srr_imm *imm, int ha_locked)
4577 struct qla_hw_data *ha = vha->hw;
4578 unsigned long flags = 0;
4581 spin_lock_irqsave(&ha->hardware_lock, flags);
4583 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
4584 NOTIFY_ACK_SRR_FLAGS_REJECT,
4585 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4586 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4589 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4594 static void qlt_handle_srr_work(struct work_struct *work)
4596 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
4597 struct scsi_qla_host *vha = tgt->vha;
4598 struct qla_tgt_srr_ctio *sctio;
4599 unsigned long flags;
4601 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
4605 spin_lock_irqsave(&tgt->srr_lock, flags);
4606 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
4607 struct qla_tgt_srr_imm *imm, *i, *ti;
4608 struct qla_tgt_cmd *cmd;
4609 struct se_cmd *se_cmd;
4612 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
4614 if (i->srr_id == sctio->srr_id) {
4615 list_del(&i->srr_list_entry);
4617 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
4618 "qla_target(%d): There must be "
4619 "only one IMM SRR per CTIO SRR "
4620 "(IMM SRR %p, id %d, CTIO %p\n",
4621 vha->vp_idx, i, i->srr_id, sctio);
4622 qlt_reject_free_srr_imm(tgt->vha, i, 0);
4628 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
4629 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
4633 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
4634 "Not found matching IMM for SRR CTIO (id %d)\n",
4638 list_del(&sctio->srr_list_entry);
4640 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4644 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4645 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4654 se_cmd = &cmd->se_cmd;
4656 cmd->sg_cnt = se_cmd->t_data_nents;
4657 cmd->sg = se_cmd->t_data_sg;
4659 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
4660 "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
4661 cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
4662 se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
4664 qlt_handle_srr(vha, sctio, imm);
4670 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4673 /* ha->hardware_lock supposed to be held on entry */
4674 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4675 struct imm_ntfy_from_isp *iocb)
4677 struct qla_tgt_srr_imm *imm;
4678 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4679 struct qla_tgt_srr_ctio *sctio;
4683 ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
4686 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
4688 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
4690 /* IRQ is already OFF */
4691 spin_lock(&tgt->srr_lock);
4692 imm->srr_id = tgt->imm_srr_id;
4693 list_add_tail(&imm->srr_list_entry,
4694 &tgt->srr_imm_list);
4695 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
4696 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4697 imm, imm->srr_id, iocb->u.isp24.srr_ui);
4698 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
4700 list_for_each_entry(sctio, &tgt->srr_ctio_list,
4702 if (sctio->srr_id == imm->srr_id) {
4708 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
4709 "Scheduling srr work\n");
4710 schedule_work(&tgt->srr_work);
4712 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
4713 "qla_target(%d): imm_srr_id "
4714 "== ctio_srr_id (%d), but there is no "
4715 "corresponding SRR CTIO, deleting IMM "
4716 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
4718 list_del(&imm->srr_list_entry);
4722 spin_unlock(&tgt->srr_lock);
4726 spin_unlock(&tgt->srr_lock);
4728 struct qla_tgt_srr_ctio *ts;
4730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
4731 "qla_target(%d): Unable to allocate SRR IMM "
4732 "entry, SRR request will be rejected\n", vha->vp_idx);
4734 /* IRQ is already OFF */
4735 spin_lock(&tgt->srr_lock);
4736 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
4738 if (sctio->srr_id == tgt->imm_srr_id) {
4739 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
4740 "CTIO SRR %p deleted (id %d)\n",
4741 sctio, sctio->srr_id);
4742 list_del(&sctio->srr_list_entry);
4743 qlt_send_term_exchange(vha, sctio->cmd,
4744 &sctio->cmd->atio, 1);
4748 spin_unlock(&tgt->srr_lock);
4755 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
4756 NOTIFY_ACK_SRR_FLAGS_REJECT,
4757 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4758 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4762 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4764 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
4765 struct imm_ntfy_from_isp *iocb)
4767 struct qla_hw_data *ha = vha->hw;
4768 uint32_t add_flags = 0;
4769 int send_notify_ack = 1;
4772 status = le16_to_cpu(iocb->u.isp2x.status);
4774 case IMM_NTFY_LIP_RESET:
4776 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
4777 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4778 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
4779 iocb->u.isp24.status_subcode);
4781 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4782 send_notify_ack = 0;
4786 case IMM_NTFY_LIP_LINK_REINIT:
4788 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4789 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
4790 "qla_target(%d): LINK REINIT (loop %#x, "
4791 "subcode %x)\n", vha->vp_idx,
4792 le16_to_cpu(iocb->u.isp24.nport_handle),
4793 iocb->u.isp24.status_subcode);
4794 if (tgt->link_reinit_iocb_pending) {
4795 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
4798 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
4799 tgt->link_reinit_iocb_pending = 1;
4801 * QLogic requires to wait after LINK REINIT for possible
4802 * PDISC or ADISC ELS commands
4804 send_notify_ack = 0;
4808 case IMM_NTFY_PORT_LOGOUT:
4809 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
4810 "qla_target(%d): Port logout (loop "
4811 "%#x, subcode %x)\n", vha->vp_idx,
4812 le16_to_cpu(iocb->u.isp24.nport_handle),
4813 iocb->u.isp24.status_subcode);
4815 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
4816 send_notify_ack = 0;
4817 /* The sessions will be cleared in the callback, if needed */
4820 case IMM_NTFY_GLBL_TPRLO:
4821 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
4822 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
4823 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4824 send_notify_ack = 0;
4825 /* The sessions will be cleared in the callback, if needed */
4828 case IMM_NTFY_PORT_CONFIG:
4829 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
4830 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
4832 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4833 send_notify_ack = 0;
4834 /* The sessions will be cleared in the callback, if needed */
4837 case IMM_NTFY_GLBL_LOGO:
4838 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
4839 "qla_target(%d): Link failure detected\n",
4841 /* I_T nexus loss */
4842 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4843 send_notify_ack = 0;
4846 case IMM_NTFY_IOCB_OVERFLOW:
4847 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
4848 "qla_target(%d): Cannot provide requested "
4849 "capability (IOCB overflowed the immediate notify "
4850 "resource count)\n", vha->vp_idx);
4853 case IMM_NTFY_ABORT_TASK:
4854 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
4855 "qla_target(%d): Abort Task (S %08x I %#x -> "
4856 "L %#x)\n", vha->vp_idx,
4857 le16_to_cpu(iocb->u.isp2x.seq_id),
4858 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
4859 le16_to_cpu(iocb->u.isp2x.lun));
4860 if (qlt_abort_task(vha, iocb) == 0)
4861 send_notify_ack = 0;
4864 case IMM_NTFY_RESOURCE:
4865 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
4866 "qla_target(%d): Out of resources, host %ld\n",
4867 vha->vp_idx, vha->host_no);
4870 case IMM_NTFY_MSG_RX:
4871 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
4872 "qla_target(%d): Immediate notify task %x\n",
4873 vha->vp_idx, iocb->u.isp2x.task_flags);
4874 if (qlt_handle_task_mgmt(vha, iocb) == 0)
4875 send_notify_ack = 0;
4879 if (qlt_24xx_handle_els(vha, iocb) == 0)
4880 send_notify_ack = 0;
4884 qlt_prepare_srr_imm(vha, iocb);
4885 send_notify_ack = 0;
4889 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
4890 "qla_target(%d): Received unknown immediate "
4891 "notify status %x\n", vha->vp_idx, status);
4895 if (send_notify_ack)
4896 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
4900 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4901 * This function sends busy to ISP 2xxx or 24xx.
4903 static int __qlt_send_busy(struct scsi_qla_host *vha,
4904 struct atio_from_isp *atio, uint16_t status)
4906 struct ctio7_to_24xx *ctio24;
4907 struct qla_hw_data *ha = vha->hw;
4909 struct qla_tgt_sess *sess = NULL;
4911 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4912 atio->u.isp24.fcp_hdr.s_id);
4914 qlt_send_term_exchange(vha, NULL, atio, 1);
4917 /* Sending marker isn't necessary, since we called from ISR */
4919 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
4921 ql_dbg(ql_dbg_io, vha, 0x3063,
4922 "qla_target(%d): %s failed: unable to allocate "
4923 "request packet", vha->vp_idx, __func__);
4927 pkt->entry_count = 1;
4928 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
4930 ctio24 = (struct ctio7_to_24xx *)pkt;
4931 ctio24->entry_type = CTIO_TYPE7;
4932 ctio24->nport_handle = sess->loop_id;
4933 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
4934 ctio24->vp_index = vha->vp_idx;
4935 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
4936 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
4937 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
4938 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
4939 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
4940 __constant_cpu_to_le16(
4941 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
4942 CTIO7_FLAGS_DONT_RET_CTIO);
4944 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
4945 * if the explicit conformation is used.
4947 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
4948 ctio24->u.status1.scsi_status = cpu_to_le16(status);
4949 /* Memory Barrier */
4951 qla2x00_start_iocbs(vha, vha->req);
4956 * This routine is used to allocate a command for either a QFull condition
4957 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
4961 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
4962 struct atio_from_isp *atio, uint16_t status, int qfull)
4964 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4965 struct qla_hw_data *ha = vha->hw;
4966 struct qla_tgt_sess *sess;
4967 struct se_session *se_sess;
4968 struct qla_tgt_cmd *cmd;
4971 if (unlikely(tgt->tgt_stop)) {
4972 ql_dbg(ql_dbg_io, vha, 0x300a,
4973 "New command while device %p is shutting down\n", tgt);
4977 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
4978 vha->hw->tgt.num_qfull_cmds_dropped++;
4979 if (vha->hw->tgt.num_qfull_cmds_dropped >
4980 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4981 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4982 vha->hw->tgt.num_qfull_cmds_dropped;
4984 ql_dbg(ql_dbg_io, vha, 0x3068,
4985 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
4986 vha->vp_idx, __func__,
4987 vha->hw->tgt.num_qfull_cmds_dropped);
4989 qlt_chk_exch_leak_thresh_hold(vha);
4993 sess = ha->tgt.tgt_ops->find_sess_by_s_id
4994 (vha, atio->u.isp24.fcp_hdr.s_id);
4998 se_sess = sess->se_sess;
5000 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
5004 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
5006 ql_dbg(ql_dbg_io, vha, 0x3009,
5007 "qla_target(%d): %s: Allocation of cmd failed\n",
5008 vha->vp_idx, __func__);
5010 vha->hw->tgt.num_qfull_cmds_dropped++;
5011 if (vha->hw->tgt.num_qfull_cmds_dropped >
5012 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
5013 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
5014 vha->hw->tgt.num_qfull_cmds_dropped;
5016 qlt_chk_exch_leak_thresh_hold(vha);
5020 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
5022 qlt_incr_num_pend_cmds(vha);
5023 INIT_LIST_HEAD(&cmd->cmd_list);
5024 memcpy(&cmd->atio, atio, sizeof(*atio));
5026 cmd->tgt = vha->vha_tgt.qla_tgt;
5028 cmd->reset_count = vha->hw->chip_reset;
5033 /* NOTE: borrowing the state field to carry the status */
5034 cmd->state = status;
5036 cmd->term_exchg = 1;
5038 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5040 vha->hw->tgt.num_qfull_cmds_alloc++;
5041 if (vha->hw->tgt.num_qfull_cmds_alloc >
5042 vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
5043 vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
5044 vha->hw->tgt.num_qfull_cmds_alloc;
5048 qlt_free_qfull_cmds(struct scsi_qla_host *vha)
5050 struct qla_hw_data *ha = vha->hw;
5051 unsigned long flags;
5052 struct qla_tgt_cmd *cmd, *tcmd;
5053 struct list_head free_list;
5056 if (list_empty(&ha->tgt.q_full_list))
5059 INIT_LIST_HEAD(&free_list);
5061 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
5063 if (list_empty(&ha->tgt.q_full_list)) {
5064 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
5068 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
5070 /* cmd->state is a borrowed field to hold status */
5071 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
5072 else if (cmd->term_exchg)
5073 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
5079 ql_dbg(ql_dbg_io, vha, 0x3006,
5080 "%s: busy sent for ox_id[%04x]\n", __func__,
5081 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5082 else if (cmd->term_exchg)
5083 ql_dbg(ql_dbg_io, vha, 0x3007,
5084 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5085 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5087 ql_dbg(ql_dbg_io, vha, 0x3008,
5088 "%s: Unexpected cmd in QFull list %p\n", __func__,
5091 list_del(&cmd->cmd_list);
5092 list_add_tail(&cmd->cmd_list, &free_list);
5094 /* piggy back on hardware_lock for protection */
5095 vha->hw->tgt.num_qfull_cmds_alloc--;
5097 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
5101 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5102 list_del(&cmd->cmd_list);
5103 /* This cmd was never sent to TCM. There is no need
5104 * to schedule free or call free_cmd
5112 qlt_send_busy(struct scsi_qla_host *vha,
5113 struct atio_from_isp *atio, uint16_t status)
5117 rc = __qlt_send_busy(vha, atio, status);
5119 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5123 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5124 struct atio_from_isp *atio)
5126 struct qla_hw_data *ha = vha->hw;
5129 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5132 status = temp_sam_status;
5133 qlt_send_busy(vha, atio, status);
5137 /* ha->hardware_lock supposed to be held on entry */
5138 /* called via callback from qla2xxx */
5139 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5140 struct atio_from_isp *atio)
5142 struct qla_hw_data *ha = vha->hw;
5143 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5146 if (unlikely(tgt == NULL)) {
5147 ql_dbg(ql_dbg_io, vha, 0x3064,
5148 "ATIO pkt, but no tgt (ha %p)", ha);
5152 * In tgt_stop mode we also should allow all requests to pass.
5153 * Otherwise, some commands can stuck.
5156 tgt->irq_cmd_count++;
5158 switch (atio->u.raw.entry_type) {
5160 if (unlikely(atio->u.isp24.exchange_addr ==
5161 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5162 ql_dbg(ql_dbg_io, vha, 0x3065,
5163 "qla_target(%d): ATIO_TYPE7 "
5164 "received with UNKNOWN exchange address, "
5165 "sending QUEUE_FULL\n", vha->vp_idx);
5166 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
5172 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5173 rc = qlt_chk_qfull_thresh_hold(vha, atio);
5175 tgt->irq_cmd_count--;
5178 rc = qlt_handle_cmd_for_atio(vha, atio);
5180 rc = qlt_handle_task_mgmt(vha, atio);
5182 if (unlikely(rc != 0)) {
5184 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5185 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5187 qlt_send_term_exchange(vha, NULL, atio, 1);
5190 if (tgt->tgt_stop) {
5191 ql_dbg(ql_dbg_tgt, vha, 0xe059,
5192 "qla_target: Unable to send "
5193 "command to target for req, "
5196 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
5197 "qla_target(%d): Unable to send "
5198 "command to target, sending BUSY "
5199 "status.\n", vha->vp_idx);
5200 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5206 case IMMED_NOTIFY_TYPE:
5208 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5209 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5210 "qla_target(%d): Received ATIO packet %x "
5211 "with error status %x\n", vha->vp_idx,
5212 atio->u.raw.entry_type,
5213 atio->u.isp2x.entry_status);
5216 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5217 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5222 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5223 "qla_target(%d): Received unknown ATIO atio "
5224 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5228 tgt->irq_cmd_count--;
5231 /* ha->hardware_lock supposed to be held on entry */
5232 /* called via callback from qla2xxx */
5233 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5235 struct qla_hw_data *ha = vha->hw;
5236 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5238 if (unlikely(tgt == NULL)) {
5239 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5240 "qla_target(%d): Response pkt %x received, but no "
5241 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
5246 * In tgt_stop mode we also should allow all requests to pass.
5247 * Otherwise, some commands can stuck.
5250 tgt->irq_cmd_count++;
5252 switch (pkt->entry_type) {
5256 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5257 qlt_do_ctio_completion(vha, entry->handle,
5258 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5263 case ACCEPT_TGT_IO_TYPE:
5265 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5267 if (atio->u.isp2x.status !=
5268 __constant_cpu_to_le16(ATIO_CDB_VALID)) {
5269 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5270 "qla_target(%d): ATIO with error "
5271 "status %x received\n", vha->vp_idx,
5272 le16_to_cpu(atio->u.isp2x.status));
5276 rc = qlt_chk_qfull_thresh_hold(vha, atio);
5278 tgt->irq_cmd_count--;
5282 rc = qlt_handle_cmd_for_atio(vha, atio);
5283 if (unlikely(rc != 0)) {
5285 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5286 qlt_send_busy(vha, atio, 0);
5288 qlt_send_term_exchange(vha, NULL, atio, 1);
5291 if (tgt->tgt_stop) {
5292 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5293 "qla_target: Unable to send "
5294 "command to target, sending TERM "
5295 "EXCHANGE for rsp\n");
5296 qlt_send_term_exchange(vha, NULL,
5299 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5300 "qla_target(%d): Unable to send "
5301 "command to target, sending BUSY "
5302 "status\n", vha->vp_idx);
5303 qlt_send_busy(vha, atio, 0);
5310 case CONTINUE_TGT_IO_TYPE:
5312 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5313 qlt_do_ctio_completion(vha, entry->handle,
5314 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5321 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5322 qlt_do_ctio_completion(vha, entry->handle,
5323 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5328 case IMMED_NOTIFY_TYPE:
5329 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5330 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5333 case NOTIFY_ACK_TYPE:
5334 if (tgt->notify_ack_expected > 0) {
5335 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5336 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5337 "NOTIFY_ACK seq %08x status %x\n",
5338 le16_to_cpu(entry->u.isp2x.seq_id),
5339 le16_to_cpu(entry->u.isp2x.status));
5340 tgt->notify_ack_expected--;
5341 if (entry->u.isp2x.status !=
5342 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5343 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5344 "qla_target(%d): NOTIFY_ACK "
5345 "failed %x\n", vha->vp_idx,
5346 le16_to_cpu(entry->u.isp2x.status));
5349 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5350 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5355 case ABTS_RECV_24XX:
5356 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5357 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5358 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5361 case ABTS_RESP_24XX:
5362 if (tgt->abts_resp_expected > 0) {
5363 struct abts_resp_from_24xx_fw *entry =
5364 (struct abts_resp_from_24xx_fw *)pkt;
5365 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5366 "ABTS_RESP_24XX: compl_status %x\n",
5367 entry->compl_status);
5368 tgt->abts_resp_expected--;
5369 if (le16_to_cpu(entry->compl_status) !=
5370 ABTS_RESP_COMPL_SUCCESS) {
5371 if ((entry->error_subcode1 == 0x1E) &&
5372 (entry->error_subcode2 == 0)) {
5374 * We've got a race here: aborted
5375 * exchange not terminated, i.e.
5376 * response for the aborted command was
5377 * sent between the abort request was
5378 * received and processed.
5379 * Unfortunately, the firmware has a
5380 * silly requirement that all aborted
5381 * exchanges must be explicitely
5382 * terminated, otherwise it refuses to
5383 * send responses for the abort
5384 * requests. So, we have to
5385 * (re)terminate the exchange and retry
5386 * the abort response.
5388 qlt_24xx_retry_term_exchange(vha,
5391 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5392 "qla_target(%d): ABTS_RESP_24XX "
5393 "failed %x (subcode %x:%x)",
5394 vha->vp_idx, entry->compl_status,
5395 entry->error_subcode1,
5396 entry->error_subcode2);
5399 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5400 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5401 "received\n", vha->vp_idx);
5406 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5407 "qla_target(%d): Received unknown response pkt "
5408 "type %x\n", vha->vp_idx, pkt->entry_type);
5412 tgt->irq_cmd_count--;
5416 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5418 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5421 struct qla_hw_data *ha = vha->hw;
5422 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5425 if (!ha->tgt.tgt_ops)
5428 if (unlikely(tgt == NULL)) {
5429 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
5430 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
5434 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5438 * In tgt_stop mode we also should allow all requests to pass.
5439 * Otherwise, some commands can stuck.
5442 tgt->irq_cmd_count++;
5445 case MBA_RESET: /* Reset */
5446 case MBA_SYSTEM_ERR: /* System Error */
5447 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
5448 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
5449 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5450 "qla_target(%d): System error async event %#x "
5451 "occurred", vha->vp_idx, code);
5453 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
5454 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5459 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5460 "qla_target(%d): Async LOOP_UP occurred "
5461 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5462 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5463 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5464 if (tgt->link_reinit_iocb_pending) {
5465 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
5467 tgt->link_reinit_iocb_pending = 0;
5472 case MBA_LIP_OCCURRED:
5475 case MBA_RSCN_UPDATE:
5476 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5477 "qla_target(%d): Async event %#x occurred "
5478 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5479 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5480 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5483 case MBA_PORT_UPDATE:
5484 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5485 "qla_target(%d): Port update async event %#x "
5486 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5487 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5488 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5489 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5491 login_code = le16_to_cpu(mailbox[2]);
5492 if (login_code == 0x4)
5493 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5494 "Async MB 2: Got PLOGI Complete\n");
5495 else if (login_code == 0x7)
5496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5497 "Async MB 2: Port Logged Out\n");
5504 tgt->irq_cmd_count--;
5507 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5513 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
5515 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
5516 "qla_target(%d): Allocation of tmp FC port failed",
5521 fcport->loop_id = loop_id;
5523 rc = qla2x00_get_port_database(vha, fcport, 0);
5524 if (rc != QLA_SUCCESS) {
5525 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
5526 "qla_target(%d): Failed to retrieve fcport "
5527 "information -- get_port_database() returned %x "
5528 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
5536 /* Must be called under tgt_mutex */
5537 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
5540 struct qla_tgt_sess *sess = NULL;
5541 fc_port_t *fcport = NULL;
5542 int rc, global_resets;
5543 uint16_t loop_id = 0;
5547 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5549 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
5551 if ((s_id[0] == 0xFF) &&
5552 (s_id[1] == 0xFC)) {
5554 * This is Domain Controller, so it should be
5555 * OK to drop SCSI commands from it.
5557 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
5558 "Unable to find initiator with S_ID %x:%x:%x",
5559 s_id[0], s_id[1], s_id[2]);
5561 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
5562 "qla_target(%d): Unable to find "
5563 "initiator with S_ID %x:%x:%x",
5564 vha->vp_idx, s_id[0], s_id[1],
5569 fcport = qlt_get_port_database(vha, loop_id);
5573 if (global_resets !=
5574 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5575 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
5576 "qla_target(%d): global reset during session discovery "
5577 "(counter was %d, new %d), retrying", vha->vp_idx,
5579 atomic_read(&vha->vha_tgt.
5580 qla_tgt->tgt_global_resets_count));
5584 sess = qlt_create_sess(vha, fcport, true);
5590 static void qlt_abort_work(struct qla_tgt *tgt,
5591 struct qla_tgt_sess_work_param *prm)
5593 struct scsi_qla_host *vha = tgt->vha;
5594 struct qla_hw_data *ha = vha->hw;
5595 struct qla_tgt_sess *sess = NULL;
5596 unsigned long flags;
5601 spin_lock_irqsave(&ha->hardware_lock, flags);
5606 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5607 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
5608 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
5610 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5611 (unsigned char *)&be_s_id);
5613 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5615 mutex_lock(&vha->vha_tgt.tgt_mutex);
5616 sess = qlt_make_local_sess(vha, s_id);
5617 /* sess has got an extra creation ref */
5618 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5620 spin_lock_irqsave(&ha->hardware_lock, flags);
5624 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5629 kref_get(&sess->se_sess->sess_kref);
5635 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5639 ha->tgt.tgt_ops->put_sess(sess);
5640 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5644 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5646 ha->tgt.tgt_ops->put_sess(sess);
5647 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5650 static void qlt_tmr_work(struct qla_tgt *tgt,
5651 struct qla_tgt_sess_work_param *prm)
5653 struct atio_from_isp *a = &prm->tm_iocb2;
5654 struct scsi_qla_host *vha = tgt->vha;
5655 struct qla_hw_data *ha = vha->hw;
5656 struct qla_tgt_sess *sess = NULL;
5657 unsigned long flags;
5658 uint8_t *s_id = NULL; /* to hide compiler warnings */
5660 uint32_t lun, unpacked_lun;
5664 spin_lock_irqsave(&ha->hardware_lock, flags);
5669 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5670 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5672 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5674 mutex_lock(&vha->vha_tgt.tgt_mutex);
5675 sess = qlt_make_local_sess(vha, s_id);
5676 /* sess has got an extra creation ref */
5677 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5679 spin_lock_irqsave(&ha->hardware_lock, flags);
5683 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5688 kref_get(&sess->se_sess->sess_kref);
5692 lun = a->u.isp24.fcp_cmnd.lun;
5693 lun_size = sizeof(lun);
5694 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5695 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5697 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5701 ha->tgt.tgt_ops->put_sess(sess);
5702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5706 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
5708 ha->tgt.tgt_ops->put_sess(sess);
5709 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5712 static void qlt_sess_work_fn(struct work_struct *work)
5714 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
5715 struct scsi_qla_host *vha = tgt->vha;
5716 unsigned long flags;
5718 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
5720 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5721 while (!list_empty(&tgt->sess_works_list)) {
5722 struct qla_tgt_sess_work_param *prm = list_entry(
5723 tgt->sess_works_list.next, typeof(*prm),
5724 sess_works_list_entry);
5727 * This work can be scheduled on several CPUs at time, so we
5728 * must delete the entry to eliminate double processing
5730 list_del(&prm->sess_works_list_entry);
5732 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5734 switch (prm->type) {
5735 case QLA_TGT_SESS_WORK_ABORT:
5736 qlt_abort_work(tgt, prm);
5738 case QLA_TGT_SESS_WORK_TM:
5739 qlt_tmr_work(tgt, prm);
5746 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5750 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5753 /* Must be called under tgt_host_action_mutex */
5754 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
5756 struct qla_tgt *tgt;
5758 if (!QLA_TGT_MODE_ENABLED())
5761 if (!IS_TGT_MODE_CAPABLE(ha)) {
5762 ql_log(ql_log_warn, base_vha, 0xe070,
5763 "This adapter does not support target mode.\n");
5767 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
5768 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
5770 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
5772 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
5774 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
5775 "Unable to allocate struct qla_tgt\n");
5779 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
5780 base_vha->host->hostt->supported_mode |= MODE_TARGET;
5783 tgt->vha = base_vha;
5784 init_waitqueue_head(&tgt->waitQ);
5785 INIT_LIST_HEAD(&tgt->sess_list);
5786 INIT_LIST_HEAD(&tgt->del_sess_list);
5787 INIT_DELAYED_WORK(&tgt->sess_del_work,
5788 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
5789 spin_lock_init(&tgt->sess_work_lock);
5790 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
5791 INIT_LIST_HEAD(&tgt->sess_works_list);
5792 spin_lock_init(&tgt->srr_lock);
5793 INIT_LIST_HEAD(&tgt->srr_ctio_list);
5794 INIT_LIST_HEAD(&tgt->srr_imm_list);
5795 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
5796 atomic_set(&tgt->tgt_global_resets_count, 0);
5798 base_vha->vha_tgt.qla_tgt = tgt;
5800 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
5801 "qla_target(%d): using 64 Bit PCI addressing",
5803 tgt->tgt_enable_64bit_addr = 1;
5805 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
5806 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
5807 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
5809 if (base_vha->fc_vport)
5812 mutex_lock(&qla_tgt_mutex);
5813 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
5814 mutex_unlock(&qla_tgt_mutex);
5819 /* Must be called under tgt_host_action_mutex */
5820 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
5822 if (!vha->vha_tgt.qla_tgt)
5825 if (vha->fc_vport) {
5826 qlt_release(vha->vha_tgt.qla_tgt);
5830 /* free left over qfull cmds */
5831 qlt_init_term_exchange(vha);
5833 mutex_lock(&qla_tgt_mutex);
5834 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
5835 mutex_unlock(&qla_tgt_mutex);
5837 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
5839 qlt_release(vha->vha_tgt.qla_tgt);
5844 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
5849 pr_debug("qla2xxx HW vha->node_name: ");
5850 for (i = 0; i < WWN_SIZE; i++)
5851 pr_debug("%02x ", vha->node_name[i]);
5853 pr_debug("qla2xxx HW vha->port_name: ");
5854 for (i = 0; i < WWN_SIZE; i++)
5855 pr_debug("%02x ", vha->port_name[i]);
5858 pr_debug("qla2xxx passed configfs WWPN: ");
5859 put_unaligned_be64(wwpn, b);
5860 for (i = 0; i < WWN_SIZE; i++)
5861 pr_debug("%02x ", b[i]);
5866 * qla_tgt_lport_register - register lport with external module
5868 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
5869 * @wwpn: Passwd FC target WWPN
5870 * @callback: lport initialization callback for tcm_qla2xxx code
5871 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
5873 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
5874 u64 npiv_wwpn, u64 npiv_wwnn,
5875 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
5877 struct qla_tgt *tgt;
5878 struct scsi_qla_host *vha;
5879 struct qla_hw_data *ha;
5880 struct Scsi_Host *host;
5881 unsigned long flags;
5885 mutex_lock(&qla_tgt_mutex);
5886 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
5894 if (!(host->hostt->supported_mode & MODE_TARGET))
5897 spin_lock_irqsave(&ha->hardware_lock, flags);
5898 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
5899 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
5901 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5904 if (tgt->tgt_stop) {
5905 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
5907 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5912 if (!scsi_host_get(host)) {
5913 ql_dbg(ql_dbg_tgt, vha, 0xe068,
5914 "Unable to scsi_host_get() for"
5915 " qla2xxx scsi_host\n");
5918 qlt_lport_dump(vha, phys_wwpn, b);
5920 if (memcmp(vha->port_name, b, WWN_SIZE)) {
5921 scsi_host_put(host);
5924 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
5926 scsi_host_put(host);
5928 mutex_unlock(&qla_tgt_mutex);
5931 mutex_unlock(&qla_tgt_mutex);
5935 EXPORT_SYMBOL(qlt_lport_register);
5938 * qla_tgt_lport_deregister - Degister lport
5940 * @vha: Registered scsi_qla_host pointer
5942 void qlt_lport_deregister(struct scsi_qla_host *vha)
5944 struct qla_hw_data *ha = vha->hw;
5945 struct Scsi_Host *sh = vha->host;
5947 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
5949 vha->vha_tgt.target_lport_ptr = NULL;
5950 ha->tgt.tgt_ops = NULL;
5952 * Release the Scsi_Host reference for the underlying qla2xxx host
5956 EXPORT_SYMBOL(qlt_lport_deregister);
5958 /* Must be called under HW lock */
5959 static void qlt_set_mode(struct scsi_qla_host *vha)
5961 struct qla_hw_data *ha = vha->hw;
5963 switch (ql2x_ini_mode) {
5964 case QLA2XXX_INI_MODE_DISABLED:
5965 case QLA2XXX_INI_MODE_EXCLUSIVE:
5966 vha->host->active_mode = MODE_TARGET;
5968 case QLA2XXX_INI_MODE_ENABLED:
5969 vha->host->active_mode |= MODE_TARGET;
5975 if (ha->tgt.ini_mode_force_reverse)
5976 qla_reverse_ini_mode(vha);
5979 /* Must be called under HW lock */
5980 static void qlt_clear_mode(struct scsi_qla_host *vha)
5982 struct qla_hw_data *ha = vha->hw;
5984 switch (ql2x_ini_mode) {
5985 case QLA2XXX_INI_MODE_DISABLED:
5986 vha->host->active_mode = MODE_UNKNOWN;
5988 case QLA2XXX_INI_MODE_EXCLUSIVE:
5989 vha->host->active_mode = MODE_INITIATOR;
5991 case QLA2XXX_INI_MODE_ENABLED:
5992 vha->host->active_mode &= ~MODE_TARGET;
5998 if (ha->tgt.ini_mode_force_reverse)
5999 qla_reverse_ini_mode(vha);
6003 * qla_tgt_enable_vha - NO LOCK HELD
6005 * host_reset, bring up w/ Target Mode Enabled
6008 qlt_enable_vha(struct scsi_qla_host *vha)
6010 struct qla_hw_data *ha = vha->hw;
6011 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6012 unsigned long flags;
6013 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6016 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6017 "Unable to locate qla_tgt pointer from"
6018 " struct qla_hw_data\n");
6023 spin_lock_irqsave(&ha->hardware_lock, flags);
6024 tgt->tgt_stopped = 0;
6026 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6029 qla24xx_disable_vp(vha);
6030 qla24xx_enable_vp(vha);
6032 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6033 qla2xxx_wake_dpc(base_vha);
6034 qla2x00_wait_for_hba_online(base_vha);
6037 EXPORT_SYMBOL(qlt_enable_vha);
6040 * qla_tgt_disable_vha - NO LOCK HELD
6042 * Disable Target Mode and reset the adapter
6044 static void qlt_disable_vha(struct scsi_qla_host *vha)
6046 struct qla_hw_data *ha = vha->hw;
6047 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6048 unsigned long flags;
6051 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6052 "Unable to locate qla_tgt pointer from"
6053 " struct qla_hw_data\n");
6058 spin_lock_irqsave(&ha->hardware_lock, flags);
6059 qlt_clear_mode(vha);
6060 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6062 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6063 qla2xxx_wake_dpc(vha);
6064 qla2x00_wait_for_hba_online(vha);
6068 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6069 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6073 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6075 if (!qla_tgt_mode_enabled(vha))
6078 vha->vha_tgt.qla_tgt = NULL;
6080 mutex_init(&vha->vha_tgt.tgt_mutex);
6081 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6083 qlt_clear_mode(vha);
6086 * NOTE: Currently the value is kept the same for <24xx and
6087 * >=24xx ISPs. If it is necessary to change it,
6088 * the check should be added for specific ISPs,
6089 * assigning the value appropriately.
6091 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6093 qlt_add_target(ha, vha);
6097 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
6100 * FC-4 Feature bit 0 indicates target functionality to the name server.
6102 if (qla_tgt_mode_enabled(vha)) {
6103 if (qla_ini_mode_enabled(vha))
6104 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
6106 ct_req->req.rff_id.fc4_feature = BIT_0;
6107 } else if (qla_ini_mode_enabled(vha)) {
6108 ct_req->req.rff_id.fc4_feature = BIT_1;
6113 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6116 * Beginning of ATIO ring has initialization control block already built
6117 * by nvram config routine.
6119 * Returns 0 on success.
6122 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6124 struct qla_hw_data *ha = vha->hw;
6126 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6128 if (!qla_tgt_mode_enabled(vha))
6131 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6132 pkt->u.raw.signature = ATIO_PROCESSED;
6139 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6140 * @ha: SCSI driver HA context
6143 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
6145 struct qla_hw_data *ha = vha->hw;
6146 struct atio_from_isp *pkt;
6149 if (!vha->flags.online)
6152 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
6153 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6154 cnt = pkt->u.raw.entry_count;
6156 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
6158 for (i = 0; i < cnt; i++) {
6159 ha->tgt.atio_ring_index++;
6160 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6161 ha->tgt.atio_ring_index = 0;
6162 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6164 ha->tgt.atio_ring_ptr++;
6166 pkt->u.raw.signature = ATIO_PROCESSED;
6167 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6172 /* Adjust ring index */
6173 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6174 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
6178 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6180 struct qla_hw_data *ha = vha->hw;
6181 if (!QLA_TGT_MODE_ENABLED())
6184 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
6185 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6186 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6188 if (IS_ATIO_MSIX_CAPABLE(ha)) {
6189 struct qla_msix_entry *msix = &ha->msix_entries[2];
6190 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6192 icb->msix_atio = cpu_to_le16(msix->entry);
6193 ql_dbg(ql_dbg_init, vha, 0xf072,
6194 "Registering ICB vector 0x%x for atio que.\n",
6200 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6202 struct qla_hw_data *ha = vha->hw;
6204 if (qla_tgt_mode_enabled(vha)) {
6205 if (!ha->tgt.saved_set) {
6206 /* We save only once */
6207 ha->tgt.saved_exchange_count = nv->exchange_count;
6208 ha->tgt.saved_firmware_options_1 =
6209 nv->firmware_options_1;
6210 ha->tgt.saved_firmware_options_2 =
6211 nv->firmware_options_2;
6212 ha->tgt.saved_firmware_options_3 =
6213 nv->firmware_options_3;
6214 ha->tgt.saved_set = 1;
6217 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
6219 /* Enable target mode */
6220 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
6222 /* Disable ini mode, if requested */
6223 if (!qla_ini_mode_enabled(vha))
6224 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
6226 /* Disable Full Login after LIP */
6227 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
6228 /* Enable initial LIP */
6229 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
6230 if (ql2xtgt_tape_enable)
6231 /* Enable FC Tape support */
6232 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6234 /* Disable FC Tape support */
6235 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6237 /* Disable Full Login after LIP */
6238 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
6239 /* Enable target PRLI control */
6240 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
6242 if (ha->tgt.saved_set) {
6243 nv->exchange_count = ha->tgt.saved_exchange_count;
6244 nv->firmware_options_1 =
6245 ha->tgt.saved_firmware_options_1;
6246 nv->firmware_options_2 =
6247 ha->tgt.saved_firmware_options_2;
6248 nv->firmware_options_3 =
6249 ha->tgt.saved_firmware_options_3;
6254 /* out-of-order frames reassembly */
6255 nv->firmware_options_3 |= BIT_6|BIT_9;
6257 if (ha->tgt.enable_class_2) {
6258 if (vha->flags.init_done)
6259 fc_host_supported_classes(vha->host) =
6260 FC_COS_CLASS2 | FC_COS_CLASS3;
6262 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
6264 if (vha->flags.init_done)
6265 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6267 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
6272 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6273 struct init_cb_24xx *icb)
6275 struct qla_hw_data *ha = vha->hw;
6277 if (ha->tgt.node_name_set) {
6278 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6279 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
6284 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6286 struct qla_hw_data *ha = vha->hw;
6288 if (!QLA_TGT_MODE_ENABLED())
6291 if (qla_tgt_mode_enabled(vha)) {
6292 if (!ha->tgt.saved_set) {
6293 /* We save only once */
6294 ha->tgt.saved_exchange_count = nv->exchange_count;
6295 ha->tgt.saved_firmware_options_1 =
6296 nv->firmware_options_1;
6297 ha->tgt.saved_firmware_options_2 =
6298 nv->firmware_options_2;
6299 ha->tgt.saved_firmware_options_3 =
6300 nv->firmware_options_3;
6301 ha->tgt.saved_set = 1;
6304 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
6306 /* Enable target mode */
6307 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
6309 /* Disable ini mode, if requested */
6310 if (!qla_ini_mode_enabled(vha))
6311 nv->firmware_options_1 |=
6312 __constant_cpu_to_le32(BIT_5);
6314 /* Disable Full Login after LIP */
6315 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
6316 /* Enable initial LIP */
6317 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
6318 if (ql2xtgt_tape_enable)
6319 /* Enable FC tape support */
6320 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6322 /* Disable FC tape support */
6323 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6325 /* Disable Full Login after LIP */
6326 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
6327 /* Enable target PRLI control */
6328 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
6330 if (ha->tgt.saved_set) {
6331 nv->exchange_count = ha->tgt.saved_exchange_count;
6332 nv->firmware_options_1 =
6333 ha->tgt.saved_firmware_options_1;
6334 nv->firmware_options_2 =
6335 ha->tgt.saved_firmware_options_2;
6336 nv->firmware_options_3 =
6337 ha->tgt.saved_firmware_options_3;
6342 /* out-of-order frames reassembly */
6343 nv->firmware_options_3 |= BIT_6|BIT_9;
6345 if (ha->tgt.enable_class_2) {
6346 if (vha->flags.init_done)
6347 fc_host_supported_classes(vha->host) =
6348 FC_COS_CLASS2 | FC_COS_CLASS3;
6350 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
6352 if (vha->flags.init_done)
6353 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6355 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
6360 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6361 struct init_cb_81xx *icb)
6363 struct qla_hw_data *ha = vha->hw;
6365 if (!QLA_TGT_MODE_ENABLED())
6368 if (ha->tgt.node_name_set) {
6369 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6370 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
6375 qlt_83xx_iospace_config(struct qla_hw_data *ha)
6377 if (!QLA_TGT_MODE_ENABLED())
6380 ha->msix_count += 1; /* For ATIO Q */
6384 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
6385 struct sts_entry_24xx *pkt)
6387 switch (pkt->entry_type) {
6388 case ABTS_RECV_24XX:
6389 case ABTS_RESP_24XX:
6391 case NOTIFY_ACK_TYPE:
6400 qlt_modify_vp_config(struct scsi_qla_host *vha,
6401 struct vp_config_entry_24xx *vpmod)
6403 if (qla_tgt_mode_enabled(vha))
6404 vpmod->options_idx1 &= ~BIT_5;
6405 /* Disable ini mode, if requested */
6406 if (!qla_ini_mode_enabled(vha))
6407 vpmod->options_idx1 &= ~BIT_4;
6411 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6413 if (!QLA_TGT_MODE_ENABLED())
6416 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6417 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
6418 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
6420 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
6421 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
6424 mutex_init(&base_vha->vha_tgt.tgt_mutex);
6425 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6426 qlt_clear_mode(base_vha);
6430 qla83xx_msix_atio_q(int irq, void *dev_id)
6432 struct rsp_que *rsp;
6433 scsi_qla_host_t *vha;
6434 struct qla_hw_data *ha;
6435 unsigned long flags;
6437 rsp = (struct rsp_que *) dev_id;
6439 vha = pci_get_drvdata(ha->pdev);
6441 spin_lock_irqsave(&ha->hardware_lock, flags);
6443 qlt_24xx_process_atio_queue(vha);
6444 qla24xx_process_response_queue(vha, rsp);
6446 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6452 qlt_mem_alloc(struct qla_hw_data *ha)
6454 if (!QLA_TGT_MODE_ENABLED())
6457 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
6458 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
6459 if (!ha->tgt.tgt_vp_map)
6462 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
6463 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
6464 &ha->tgt.atio_dma, GFP_KERNEL);
6465 if (!ha->tgt.atio_ring) {
6466 kfree(ha->tgt.tgt_vp_map);
6473 qlt_mem_free(struct qla_hw_data *ha)
6475 if (!QLA_TGT_MODE_ENABLED())
6478 if (ha->tgt.atio_ring) {
6479 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
6480 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
6483 kfree(ha->tgt.tgt_vp_map);
6486 /* vport_slock to be held by the caller */
6488 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
6490 if (!QLA_TGT_MODE_ENABLED())
6495 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
6498 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
6501 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
6504 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
6509 static int __init qlt_parse_ini_mode(void)
6511 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
6512 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
6513 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
6514 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
6515 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
6516 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
6523 int __init qlt_init(void)
6527 if (!qlt_parse_ini_mode()) {
6528 ql_log(ql_log_fatal, NULL, 0xe06b,
6529 "qlt_parse_ini_mode() failed\n");
6533 if (!QLA_TGT_MODE_ENABLED())
6536 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
6537 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
6538 qla_tgt_mgmt_cmd), 0, NULL);
6539 if (!qla_tgt_mgmt_cmd_cachep) {
6540 ql_log(ql_log_fatal, NULL, 0xe06d,
6541 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
6545 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
6546 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
6547 if (!qla_tgt_mgmt_cmd_mempool) {
6548 ql_log(ql_log_fatal, NULL, 0xe06e,
6549 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
6551 goto out_mgmt_cmd_cachep;
6554 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
6556 ql_log(ql_log_fatal, NULL, 0xe06f,
6557 "alloc_workqueue for qla_tgt_wq failed\n");
6559 goto out_cmd_mempool;
6562 * Return 1 to signal that initiator-mode is being disabled
6564 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
6567 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6568 out_mgmt_cmd_cachep:
6569 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
6575 if (!QLA_TGT_MODE_ENABLED())
6578 destroy_workqueue(qla_tgt_wq);
6579 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6580 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);