2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
12 * Contact Information:
13 * linux-drivers@emulex.com
17 * Costa Mesa, CA 92626
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
30 #include <linux/module.h>
31 #include <linux/bsg-lib.h>
33 #include <scsi/libiscsi.h>
34 #include <scsi/scsi_bsg_iscsi.h>
35 #include <scsi/scsi_netlink.h>
36 #include <scsi/scsi_transport_iscsi.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi.h>
47 static unsigned int be_iopoll_budget = 10;
48 static unsigned int be_max_phys_size = 64;
49 static unsigned int enable_msix = 1;
51 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
52 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
53 MODULE_VERSION(BUILD_STR);
54 MODULE_AUTHOR("Emulex Corporation");
55 MODULE_LICENSE("GPL");
56 module_param(be_iopoll_budget, int, 0);
57 module_param(enable_msix, int, 0);
58 module_param(be_max_phys_size, uint, S_IRUGO);
59 MODULE_PARM_DESC(be_max_phys_size,
60 "Maximum Size (In Kilobytes) of physically contiguous "
61 "memory that can be allocated. Range is 16 - 128");
63 #define beiscsi_disp_param(_name)\
65 beiscsi_##_name##_disp(struct device *dev,\
66 struct device_attribute *attrib, char *buf) \
68 struct Scsi_Host *shost = class_to_shost(dev);\
69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 uint32_t param_val = 0; \
71 param_val = phba->attr_##_name;\
72 return snprintf(buf, PAGE_SIZE, "%d\n",\
76 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
78 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
80 if (val >= _minval && val <= _maxval) {\
81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
82 "BA_%d : beiscsi_"#_name" updated "\
83 "from 0x%x ==> 0x%x\n",\
84 phba->attr_##_name, val); \
85 phba->attr_##_name = val;\
88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
89 "BA_%d beiscsi_"#_name" attribute "\
90 "cannot be updated to 0x%x, "\
91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
95 #define beiscsi_store_param(_name) \
97 beiscsi_##_name##_store(struct device *dev,\
98 struct device_attribute *attr, const char *buf,\
101 struct Scsi_Host *shost = class_to_shost(dev);\
102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
103 uint32_t param_val = 0;\
104 if (!isdigit(buf[0]))\
106 if (sscanf(buf, "%i", ¶m_val) != 1)\
108 if (beiscsi_##_name##_change(phba, param_val) == 0) \
114 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
116 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
118 if (val >= _minval && val <= _maxval) {\
119 phba->attr_##_name = val;\
122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
123 "BA_%d beiscsi_"#_name" attribute " \
124 "cannot be updated to 0x%x, "\
125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
126 phba->attr_##_name = _defval;\
130 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
131 static uint beiscsi_##_name = _defval;\
132 module_param(beiscsi_##_name, uint, S_IRUGO);\
133 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
134 beiscsi_disp_param(_name)\
135 beiscsi_change_param(_name, _minval, _maxval, _defval)\
136 beiscsi_store_param(_name)\
137 beiscsi_init_param(_name, _minval, _maxval, _defval)\
138 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
139 beiscsi_##_name##_disp, beiscsi_##_name##_store)
142 * When new log level added update the
143 * the MAX allowed value for log_enable
145 BEISCSI_RW_ATTR(log_enable, 0x00,
146 0xFF, 0x00, "Enable logging Bit Mask\n"
147 "\t\t\t\tInitialization Events : 0x01\n"
148 "\t\t\t\tMailbox Events : 0x02\n"
149 "\t\t\t\tMiscellaneous Events : 0x04\n"
150 "\t\t\t\tError Handling : 0x08\n"
151 "\t\t\t\tIO Path Events : 0x10\n"
152 "\t\t\t\tConfiguration Path : 0x20\n");
154 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
155 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
156 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
157 DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
158 struct device_attribute *beiscsi_attrs[] = {
159 &dev_attr_beiscsi_log_enable,
160 &dev_attr_beiscsi_drvr_ver,
161 &dev_attr_beiscsi_adapter_family,
162 &dev_attr_beiscsi_fw_ver,
163 &dev_attr_beiscsi_active_cid_count,
167 static char const *cqe_desc[] = {
170 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
171 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
172 "CXN_KILLED_BURST_LEN_MISMATCH",
173 "CXN_KILLED_AHS_RCVD",
174 "CXN_KILLED_HDR_DIGEST_ERR",
175 "CXN_KILLED_UNKNOWN_HDR",
176 "CXN_KILLED_STALE_ITT_TTT_RCVD",
177 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
178 "CXN_KILLED_RST_RCVD",
179 "CXN_KILLED_TIMED_OUT",
180 "CXN_KILLED_RST_SENT",
181 "CXN_KILLED_FIN_RCVD",
182 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
183 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
184 "CXN_KILLED_OVER_RUN_RESIDUAL",
185 "CXN_KILLED_UNDER_RUN_RESIDUAL",
186 "CMD_KILLED_INVALID_STATSN_RCVD",
187 "CMD_KILLED_INVALID_R2T_RCVD",
188 "CMD_CXN_KILLED_LUN_INVALID",
189 "CMD_CXN_KILLED_ICD_INVALID",
190 "CMD_CXN_KILLED_ITT_INVALID",
191 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
192 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
193 "CXN_INVALIDATE_NOTIFY",
194 "CXN_INVALIDATE_INDEX_NOTIFY",
195 "CMD_INVALIDATED_NOTIFY",
198 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
200 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
201 "SOL_CMD_KILLED_DIF_ERR",
202 "CXN_KILLED_SYN_RCVD",
203 "CXN_KILLED_IMM_DATA_RCVD"
206 static int beiscsi_slave_configure(struct scsi_device *sdev)
208 blk_queue_max_segment_size(sdev->request_queue, 65536);
212 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
214 struct iscsi_cls_session *cls_session;
215 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
216 struct beiscsi_io_task *aborted_io_task;
217 struct iscsi_conn *conn;
218 struct beiscsi_conn *beiscsi_conn;
219 struct beiscsi_hba *phba;
220 struct iscsi_session *session;
221 struct invalidate_command_table *inv_tbl;
222 struct be_dma_mem nonemb_cmd;
223 unsigned int cid, tag, num_invalidate;
225 cls_session = starget_to_session(scsi_target(sc->device));
226 session = cls_session->dd_data;
228 spin_lock_bh(&session->lock);
229 if (!aborted_task || !aborted_task->sc) {
231 spin_unlock_bh(&session->lock);
235 aborted_io_task = aborted_task->dd_data;
236 if (!aborted_io_task->scsi_cmnd) {
237 /* raced or invalid command */
238 spin_unlock_bh(&session->lock);
241 spin_unlock_bh(&session->lock);
242 conn = aborted_task->conn;
243 beiscsi_conn = conn->dd_data;
244 phba = beiscsi_conn->phba;
246 /* invalidate iocb */
247 cid = beiscsi_conn->beiscsi_conn_cid;
248 inv_tbl = phba->inv_tbl;
249 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
251 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
253 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
254 sizeof(struct invalidate_commands_params_in),
256 if (nonemb_cmd.va == NULL) {
257 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
258 "BM_%d : Failed to allocate memory for"
259 "mgmt_invalidate_icds\n");
262 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
264 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
267 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
268 "BM_%d : mgmt_invalidate_icds could not be"
270 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
271 nonemb_cmd.va, nonemb_cmd.dma);
276 beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
277 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
278 nonemb_cmd.va, nonemb_cmd.dma);
279 return iscsi_eh_abort(sc);
282 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
284 struct iscsi_task *abrt_task;
285 struct beiscsi_io_task *abrt_io_task;
286 struct iscsi_conn *conn;
287 struct beiscsi_conn *beiscsi_conn;
288 struct beiscsi_hba *phba;
289 struct iscsi_session *session;
290 struct iscsi_cls_session *cls_session;
291 struct invalidate_command_table *inv_tbl;
292 struct be_dma_mem nonemb_cmd;
293 unsigned int cid, tag, i, num_invalidate;
295 /* invalidate iocbs */
296 cls_session = starget_to_session(scsi_target(sc->device));
297 session = cls_session->dd_data;
298 spin_lock_bh(&session->lock);
299 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
300 spin_unlock_bh(&session->lock);
303 conn = session->leadconn;
304 beiscsi_conn = conn->dd_data;
305 phba = beiscsi_conn->phba;
306 cid = beiscsi_conn->beiscsi_conn_cid;
307 inv_tbl = phba->inv_tbl;
308 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
310 for (i = 0; i < conn->session->cmds_max; i++) {
311 abrt_task = conn->session->cmds[i];
312 abrt_io_task = abrt_task->dd_data;
313 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
316 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
320 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
324 spin_unlock_bh(&session->lock);
325 inv_tbl = phba->inv_tbl;
327 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
328 sizeof(struct invalidate_commands_params_in),
330 if (nonemb_cmd.va == NULL) {
331 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
332 "BM_%d : Failed to allocate memory for"
333 "mgmt_invalidate_icds\n");
336 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
337 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
338 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
341 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
342 "BM_%d : mgmt_invalidate_icds could not be"
344 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
345 nonemb_cmd.va, nonemb_cmd.dma);
349 beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
350 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
351 nonemb_cmd.va, nonemb_cmd.dma);
352 return iscsi_eh_device_reset(sc);
355 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
357 struct beiscsi_hba *phba = data;
358 struct mgmt_session_info *boot_sess = &phba->boot_sess;
359 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
364 case ISCSI_BOOT_TGT_NAME:
365 rc = sprintf(buf, "%.*s\n",
366 (int)strlen(boot_sess->target_name),
367 (char *)&boot_sess->target_name);
369 case ISCSI_BOOT_TGT_IP_ADDR:
370 if (boot_conn->dest_ipaddr.ip_type == 0x1)
371 rc = sprintf(buf, "%pI4\n",
372 (char *)&boot_conn->dest_ipaddr.addr);
374 rc = sprintf(str, "%pI6\n",
375 (char *)&boot_conn->dest_ipaddr.addr);
377 case ISCSI_BOOT_TGT_PORT:
378 rc = sprintf(str, "%d\n", boot_conn->dest_port);
381 case ISCSI_BOOT_TGT_CHAP_NAME:
382 rc = sprintf(str, "%.*s\n",
383 boot_conn->negotiated_login_options.auth_data.chap.
384 target_chap_name_length,
385 (char *)&boot_conn->negotiated_login_options.
386 auth_data.chap.target_chap_name);
388 case ISCSI_BOOT_TGT_CHAP_SECRET:
389 rc = sprintf(str, "%.*s\n",
390 boot_conn->negotiated_login_options.auth_data.chap.
391 target_secret_length,
392 (char *)&boot_conn->negotiated_login_options.
393 auth_data.chap.target_secret);
395 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
396 rc = sprintf(str, "%.*s\n",
397 boot_conn->negotiated_login_options.auth_data.chap.
398 intr_chap_name_length,
399 (char *)&boot_conn->negotiated_login_options.
400 auth_data.chap.intr_chap_name);
402 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
403 rc = sprintf(str, "%.*s\n",
404 boot_conn->negotiated_login_options.auth_data.chap.
406 (char *)&boot_conn->negotiated_login_options.
407 auth_data.chap.intr_secret);
409 case ISCSI_BOOT_TGT_FLAGS:
410 rc = sprintf(str, "2\n");
412 case ISCSI_BOOT_TGT_NIC_ASSOC:
413 rc = sprintf(str, "0\n");
422 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
424 struct beiscsi_hba *phba = data;
429 case ISCSI_BOOT_INI_INITIATOR_NAME:
430 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
439 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
441 struct beiscsi_hba *phba = data;
446 case ISCSI_BOOT_ETH_FLAGS:
447 rc = sprintf(str, "2\n");
449 case ISCSI_BOOT_ETH_INDEX:
450 rc = sprintf(str, "0\n");
452 case ISCSI_BOOT_ETH_MAC:
453 rc = beiscsi_get_macaddr(str, phba);
463 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
468 case ISCSI_BOOT_TGT_NAME:
469 case ISCSI_BOOT_TGT_IP_ADDR:
470 case ISCSI_BOOT_TGT_PORT:
471 case ISCSI_BOOT_TGT_CHAP_NAME:
472 case ISCSI_BOOT_TGT_CHAP_SECRET:
473 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
474 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
475 case ISCSI_BOOT_TGT_NIC_ASSOC:
476 case ISCSI_BOOT_TGT_FLAGS:
486 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
491 case ISCSI_BOOT_INI_INITIATOR_NAME:
502 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
507 case ISCSI_BOOT_ETH_FLAGS:
508 case ISCSI_BOOT_ETH_MAC:
509 case ISCSI_BOOT_ETH_INDEX:
519 /*------------------- PCI Driver operations and data ----------------- */
520 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
521 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
522 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
523 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
524 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
525 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
526 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
529 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
532 static struct scsi_host_template beiscsi_sht = {
533 .module = THIS_MODULE,
534 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
535 .proc_name = DRV_NAME,
536 .queuecommand = iscsi_queuecommand,
537 .change_queue_depth = iscsi_change_queue_depth,
538 .slave_configure = beiscsi_slave_configure,
539 .target_alloc = iscsi_target_alloc,
540 .eh_abort_handler = beiscsi_eh_abort,
541 .eh_device_reset_handler = beiscsi_eh_device_reset,
542 .eh_target_reset_handler = iscsi_eh_session_reset,
543 .shost_attrs = beiscsi_attrs,
544 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
545 .can_queue = BE2_IO_DEPTH,
547 .max_sectors = BEISCSI_MAX_SECTORS,
548 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
549 .use_clustering = ENABLE_CLUSTERING,
550 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
554 static struct scsi_transport_template *beiscsi_scsi_transport;
556 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
558 struct beiscsi_hba *phba;
559 struct Scsi_Host *shost;
561 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
563 dev_err(&pcidev->dev,
564 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
567 shost->dma_boundary = pcidev->dma_mask;
568 shost->max_id = BE2_MAX_SESSIONS;
569 shost->max_channel = 0;
570 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
571 shost->max_lun = BEISCSI_NUM_MAX_LUN;
572 shost->transportt = beiscsi_scsi_transport;
573 phba = iscsi_host_priv(shost);
574 memset(phba, 0, sizeof(*phba));
576 phba->pcidev = pci_dev_get(pcidev);
577 pci_set_drvdata(pcidev, phba);
578 phba->interface_handle = 0xFFFFFFFF;
580 if (iscsi_host_add(shost, &phba->pcidev->dev))
586 pci_dev_put(phba->pcidev);
587 iscsi_host_free(phba->shost);
591 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
594 iounmap(phba->csr_va);
598 iounmap(phba->db_va);
602 iounmap(phba->pci_va);
607 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
608 struct pci_dev *pcidev)
613 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
614 pci_resource_len(pcidev, 2));
617 phba->ctrl.csr = addr;
619 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
621 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
624 phba->ctrl.db = addr;
626 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
628 if (phba->generation == BE_GEN2)
633 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
634 pci_resource_len(pcidev, pcicfg_reg));
638 phba->ctrl.pcicfg = addr;
640 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
644 beiscsi_unmap_pci_function(phba);
648 static int beiscsi_enable_pci(struct pci_dev *pcidev)
652 ret = pci_enable_device(pcidev);
654 dev_err(&pcidev->dev,
655 "beiscsi_enable_pci - enable device failed\n");
659 pci_set_master(pcidev);
660 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
661 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
663 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
664 pci_disable_device(pcidev);
671 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
673 struct be_ctrl_info *ctrl = &phba->ctrl;
674 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
675 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
679 status = beiscsi_map_pci_bars(phba, pdev);
682 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
683 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
684 mbox_mem_alloc->size,
685 &mbox_mem_alloc->dma);
686 if (!mbox_mem_alloc->va) {
687 beiscsi_unmap_pci_function(phba);
691 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
692 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
693 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
694 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
695 spin_lock_init(&ctrl->mbox_lock);
696 spin_lock_init(&phba->ctrl.mcc_lock);
697 spin_lock_init(&phba->ctrl.mcc_cq_lock);
703 * beiscsi_get_params()- Set the config paramters
704 * @phba: ptr device priv structure
706 static void beiscsi_get_params(struct beiscsi_hba *phba)
708 uint32_t total_cid_count = 0;
709 uint32_t total_icd_count = 0;
712 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
713 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
715 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
716 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
717 total_icd_count = phba->fw_config.
718 iscsi_icd_count[ulp_num];
722 phba->params.ios_per_ctrl = (total_icd_count -
724 BE2_TMFS + BE2_NOPOUT_REQ));
725 phba->params.cxns_per_ctrl = total_cid_count;
726 phba->params.asyncpdus_per_ctrl = total_cid_count;
727 phba->params.icds_per_ctrl = total_icd_count;
728 phba->params.num_sge_per_io = BE2_SGE;
729 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
730 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
731 phba->params.eq_timer = 64;
732 phba->params.num_eq_entries = 1024;
733 phba->params.num_cq_entries = 1024;
734 phba->params.wrbs_per_cxn = 256;
737 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
738 unsigned int id, unsigned int clr_interrupt,
739 unsigned int num_processed,
740 unsigned char rearm, unsigned char event)
743 val |= id & DB_EQ_RING_ID_MASK;
745 val |= 1 << DB_EQ_REARM_SHIFT;
747 val |= 1 << DB_EQ_CLR_SHIFT;
749 val |= 1 << DB_EQ_EVNT_SHIFT;
750 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
751 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
755 * be_isr_mcc - The isr routine of the driver.
757 * @dev_id: Pointer to host adapter structure
759 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
761 struct beiscsi_hba *phba;
762 struct be_eq_entry *eqe = NULL;
763 struct be_queue_info *eq;
764 struct be_queue_info *mcc;
765 unsigned int num_eq_processed;
766 struct be_eq_obj *pbe_eq;
772 mcc = &phba->ctrl.mcc_obj.cq;
773 eqe = queue_tail_node(eq);
775 num_eq_processed = 0;
777 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
779 if (((eqe->dw[offsetof(struct amap_eq_entry,
781 EQE_RESID_MASK) >> 16) == mcc->id) {
782 spin_lock_irqsave(&phba->isr_lock, flags);
783 pbe_eq->todo_mcc_cq = true;
784 spin_unlock_irqrestore(&phba->isr_lock, flags);
786 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
788 eqe = queue_tail_node(eq);
791 if (pbe_eq->todo_mcc_cq)
792 queue_work(phba->wq, &pbe_eq->work_cqs);
793 if (num_eq_processed)
794 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
800 * be_isr_msix - The isr routine of the driver.
802 * @dev_id: Pointer to host adapter structure
804 static irqreturn_t be_isr_msix(int irq, void *dev_id)
806 struct beiscsi_hba *phba;
807 struct be_eq_entry *eqe = NULL;
808 struct be_queue_info *eq;
809 struct be_queue_info *cq;
810 unsigned int num_eq_processed;
811 struct be_eq_obj *pbe_eq;
817 eqe = queue_tail_node(eq);
820 num_eq_processed = 0;
821 if (blk_iopoll_enabled) {
822 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
824 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
825 blk_iopoll_sched(&pbe_eq->iopoll);
827 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
829 eqe = queue_tail_node(eq);
833 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
835 spin_lock_irqsave(&phba->isr_lock, flags);
836 pbe_eq->todo_cq = true;
837 spin_unlock_irqrestore(&phba->isr_lock, flags);
838 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
840 eqe = queue_tail_node(eq);
845 queue_work(phba->wq, &pbe_eq->work_cqs);
848 if (num_eq_processed)
849 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
855 * be_isr - The isr routine of the driver.
857 * @dev_id: Pointer to host adapter structure
859 static irqreturn_t be_isr(int irq, void *dev_id)
861 struct beiscsi_hba *phba;
862 struct hwi_controller *phwi_ctrlr;
863 struct hwi_context_memory *phwi_context;
864 struct be_eq_entry *eqe = NULL;
865 struct be_queue_info *eq;
866 struct be_queue_info *cq;
867 struct be_queue_info *mcc;
868 unsigned long flags, index;
869 unsigned int num_mcceq_processed, num_ioeq_processed;
870 struct be_ctrl_info *ctrl;
871 struct be_eq_obj *pbe_eq;
876 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
877 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
881 phwi_ctrlr = phba->phwi_ctrlr;
882 phwi_context = phwi_ctrlr->phwi_ctxt;
883 pbe_eq = &phwi_context->be_eq[0];
885 eq = &phwi_context->be_eq[0].q;
886 mcc = &phba->ctrl.mcc_obj.cq;
888 eqe = queue_tail_node(eq);
890 num_ioeq_processed = 0;
891 num_mcceq_processed = 0;
892 if (blk_iopoll_enabled) {
893 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
895 if (((eqe->dw[offsetof(struct amap_eq_entry,
897 EQE_RESID_MASK) >> 16) == mcc->id) {
898 spin_lock_irqsave(&phba->isr_lock, flags);
899 pbe_eq->todo_mcc_cq = true;
900 spin_unlock_irqrestore(&phba->isr_lock, flags);
901 num_mcceq_processed++;
903 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
904 blk_iopoll_sched(&pbe_eq->iopoll);
905 num_ioeq_processed++;
907 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
909 eqe = queue_tail_node(eq);
911 if (num_ioeq_processed || num_mcceq_processed) {
912 if (pbe_eq->todo_mcc_cq)
913 queue_work(phba->wq, &pbe_eq->work_cqs);
915 if ((num_mcceq_processed) && (!num_ioeq_processed))
916 hwi_ring_eq_db(phba, eq->id, 0,
917 (num_ioeq_processed +
918 num_mcceq_processed) , 1, 1);
920 hwi_ring_eq_db(phba, eq->id, 0,
921 (num_ioeq_processed +
922 num_mcceq_processed), 0, 1);
928 cq = &phwi_context->be_cq[0];
929 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
932 if (((eqe->dw[offsetof(struct amap_eq_entry,
934 EQE_RESID_MASK) >> 16) != cq->id) {
935 spin_lock_irqsave(&phba->isr_lock, flags);
936 pbe_eq->todo_mcc_cq = true;
937 spin_unlock_irqrestore(&phba->isr_lock, flags);
939 spin_lock_irqsave(&phba->isr_lock, flags);
940 pbe_eq->todo_cq = true;
941 spin_unlock_irqrestore(&phba->isr_lock, flags);
943 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
945 eqe = queue_tail_node(eq);
946 num_ioeq_processed++;
948 if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
949 queue_work(phba->wq, &pbe_eq->work_cqs);
951 if (num_ioeq_processed) {
952 hwi_ring_eq_db(phba, eq->id, 0,
953 num_ioeq_processed, 1, 1);
960 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
962 struct pci_dev *pcidev = phba->pcidev;
963 struct hwi_controller *phwi_ctrlr;
964 struct hwi_context_memory *phwi_context;
965 int ret, msix_vec, i, j;
967 phwi_ctrlr = phba->phwi_ctrlr;
968 phwi_context = phwi_ctrlr->phwi_ctxt;
970 if (phba->msix_enabled) {
971 for (i = 0; i < phba->num_cpus; i++) {
972 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
974 if (!phba->msi_name[i]) {
979 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
980 phba->shost->host_no, i);
981 msix_vec = phba->msix_entries[i].vector;
982 ret = request_irq(msix_vec, be_isr_msix, 0,
984 &phwi_context->be_eq[i]);
986 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
987 "BM_%d : beiscsi_init_irqs-Failed to"
988 "register msix for i = %d\n",
990 kfree(phba->msi_name[i]);
994 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
995 if (!phba->msi_name[i]) {
999 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
1000 phba->shost->host_no);
1001 msix_vec = phba->msix_entries[i].vector;
1002 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
1003 &phwi_context->be_eq[i]);
1005 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
1006 "BM_%d : beiscsi_init_irqs-"
1007 "Failed to register beiscsi_msix_mcc\n");
1008 kfree(phba->msi_name[i]);
1009 goto free_msix_irqs;
1013 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1016 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1017 "BM_%d : beiscsi_init_irqs-"
1018 "Failed to register irq\\n");
1024 for (j = i - 1; j >= 0; j--) {
1025 kfree(phba->msi_name[j]);
1026 msix_vec = phba->msix_entries[j].vector;
1027 free_irq(msix_vec, &phwi_context->be_eq[j]);
1032 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
1033 unsigned int id, unsigned int num_processed,
1034 unsigned char rearm, unsigned char event)
1037 val |= id & DB_CQ_RING_ID_MASK;
1039 val |= 1 << DB_CQ_REARM_SHIFT;
1040 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1041 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1045 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1046 struct beiscsi_hba *phba,
1047 struct pdu_base *ppdu,
1048 unsigned long pdu_len,
1049 void *pbuffer, unsigned long buf_len)
1051 struct iscsi_conn *conn = beiscsi_conn->conn;
1052 struct iscsi_session *session = conn->session;
1053 struct iscsi_task *task;
1054 struct beiscsi_io_task *io_task;
1055 struct iscsi_hdr *login_hdr;
1057 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1058 PDUBASE_OPCODE_MASK) {
1059 case ISCSI_OP_NOOP_IN:
1063 case ISCSI_OP_ASYNC_EVENT:
1065 case ISCSI_OP_REJECT:
1067 WARN_ON(!(buf_len == 48));
1068 beiscsi_log(phba, KERN_ERR,
1069 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1070 "BM_%d : In ISCSI_OP_REJECT\n");
1072 case ISCSI_OP_LOGIN_RSP:
1073 case ISCSI_OP_TEXT_RSP:
1074 task = conn->login_task;
1075 io_task = task->dd_data;
1076 login_hdr = (struct iscsi_hdr *)ppdu;
1077 login_hdr->itt = io_task->libiscsi_itt;
1080 beiscsi_log(phba, KERN_WARNING,
1081 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1082 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1084 dw[offsetof(struct amap_pdu_base, opcode) / 32]
1085 & PDUBASE_OPCODE_MASK));
1089 spin_lock_bh(&session->lock);
1090 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1091 spin_unlock_bh(&session->lock);
1095 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1097 struct sgl_handle *psgl_handle;
1099 if (phba->io_sgl_hndl_avbl) {
1100 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1101 "BM_%d : In alloc_io_sgl_handle,"
1102 " io_sgl_alloc_index=%d\n",
1103 phba->io_sgl_alloc_index);
1105 psgl_handle = phba->io_sgl_hndl_base[phba->
1106 io_sgl_alloc_index];
1107 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1108 phba->io_sgl_hndl_avbl--;
1109 if (phba->io_sgl_alloc_index == (phba->params.
1111 phba->io_sgl_alloc_index = 0;
1113 phba->io_sgl_alloc_index++;
1120 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1122 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1123 "BM_%d : In free_,io_sgl_free_index=%d\n",
1124 phba->io_sgl_free_index);
1126 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1128 * this can happen if clean_task is called on a task that
1129 * failed in xmit_task or alloc_pdu.
1131 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1132 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1133 "value there=%p\n", phba->io_sgl_free_index,
1134 phba->io_sgl_hndl_base
1135 [phba->io_sgl_free_index]);
1138 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1139 phba->io_sgl_hndl_avbl++;
1140 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1141 phba->io_sgl_free_index = 0;
1143 phba->io_sgl_free_index++;
1147 * alloc_wrb_handle - To allocate a wrb handle
1148 * @phba: The hba pointer
1149 * @cid: The cid to use for allocation
1151 * This happens under session_lock until submission to chip
1153 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1155 struct hwi_wrb_context *pwrb_context;
1156 struct hwi_controller *phwi_ctrlr;
1157 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1158 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1160 phwi_ctrlr = phba->phwi_ctrlr;
1161 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1162 if (pwrb_context->wrb_handles_available >= 2) {
1163 pwrb_handle = pwrb_context->pwrb_handle_base[
1164 pwrb_context->alloc_index];
1165 pwrb_context->wrb_handles_available--;
1166 if (pwrb_context->alloc_index ==
1167 (phba->params.wrbs_per_cxn - 1))
1168 pwrb_context->alloc_index = 0;
1170 pwrb_context->alloc_index++;
1171 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1172 pwrb_context->alloc_index];
1173 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1180 * free_wrb_handle - To free the wrb handle back to pool
1181 * @phba: The hba pointer
1182 * @pwrb_context: The context to free from
1183 * @pwrb_handle: The wrb_handle to free
1185 * This happens under session_lock until submission to chip
1188 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1189 struct wrb_handle *pwrb_handle)
1191 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1192 pwrb_context->wrb_handles_available++;
1193 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1194 pwrb_context->free_index = 0;
1196 pwrb_context->free_index++;
1198 beiscsi_log(phba, KERN_INFO,
1199 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1200 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1201 "wrb_handles_available=%d\n",
1202 pwrb_handle, pwrb_context->free_index,
1203 pwrb_context->wrb_handles_available);
1206 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1208 struct sgl_handle *psgl_handle;
1210 if (phba->eh_sgl_hndl_avbl) {
1211 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1212 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1213 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1214 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1215 phba->eh_sgl_alloc_index,
1216 phba->eh_sgl_alloc_index);
1218 phba->eh_sgl_hndl_avbl--;
1219 if (phba->eh_sgl_alloc_index ==
1220 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1222 phba->eh_sgl_alloc_index = 0;
1224 phba->eh_sgl_alloc_index++;
1231 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1234 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1235 "BM_%d : In free_mgmt_sgl_handle,"
1236 "eh_sgl_free_index=%d\n",
1237 phba->eh_sgl_free_index);
1239 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1241 * this can happen if clean_task is called on a task that
1242 * failed in xmit_task or alloc_pdu.
1244 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1245 "BM_%d : Double Free in eh SGL ,"
1246 "eh_sgl_free_index=%d\n",
1247 phba->eh_sgl_free_index);
1250 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1251 phba->eh_sgl_hndl_avbl++;
1252 if (phba->eh_sgl_free_index ==
1253 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1254 phba->eh_sgl_free_index = 0;
1256 phba->eh_sgl_free_index++;
1260 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1261 struct iscsi_task *task,
1262 struct common_sol_cqe *csol_cqe)
1264 struct beiscsi_io_task *io_task = task->dd_data;
1265 struct be_status_bhs *sts_bhs =
1266 (struct be_status_bhs *)io_task->cmd_bhs;
1267 struct iscsi_conn *conn = beiscsi_conn->conn;
1268 unsigned char *sense;
1269 u32 resid = 0, exp_cmdsn, max_cmdsn;
1270 u8 rsp, status, flags;
1272 exp_cmdsn = csol_cqe->exp_cmdsn;
1273 max_cmdsn = (csol_cqe->exp_cmdsn +
1274 csol_cqe->cmd_wnd - 1);
1275 rsp = csol_cqe->i_resp;
1276 status = csol_cqe->i_sts;
1277 flags = csol_cqe->i_flags;
1278 resid = csol_cqe->res_cnt;
1281 if (io_task->scsi_cmnd)
1282 scsi_dma_unmap(io_task->scsi_cmnd);
1286 task->sc->result = (DID_OK << 16) | status;
1287 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1288 task->sc->result = DID_ERROR << 16;
1292 /* bidi not initially supported */
1293 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1294 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1295 task->sc->result = DID_ERROR << 16;
1297 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1298 scsi_set_resid(task->sc, resid);
1299 if (!status && (scsi_bufflen(task->sc) - resid <
1300 task->sc->underflow))
1301 task->sc->result = DID_ERROR << 16;
1305 if (status == SAM_STAT_CHECK_CONDITION) {
1307 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1309 sense = sts_bhs->sense_info + sizeof(unsigned short);
1310 sense_len = be16_to_cpu(*slen);
1311 memcpy(task->sc->sense_buffer, sense,
1312 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1315 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1316 conn->rxdata_octets += resid;
1318 scsi_dma_unmap(io_task->scsi_cmnd);
1319 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1323 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1324 struct iscsi_task *task,
1325 struct common_sol_cqe *csol_cqe)
1327 struct iscsi_logout_rsp *hdr;
1328 struct beiscsi_io_task *io_task = task->dd_data;
1329 struct iscsi_conn *conn = beiscsi_conn->conn;
1331 hdr = (struct iscsi_logout_rsp *)task->hdr;
1332 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1335 hdr->flags = csol_cqe->i_flags;
1336 hdr->response = csol_cqe->i_resp;
1337 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1338 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1339 csol_cqe->cmd_wnd - 1);
1341 hdr->dlength[0] = 0;
1342 hdr->dlength[1] = 0;
1343 hdr->dlength[2] = 0;
1345 hdr->itt = io_task->libiscsi_itt;
1346 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1350 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1351 struct iscsi_task *task,
1352 struct common_sol_cqe *csol_cqe)
1354 struct iscsi_tm_rsp *hdr;
1355 struct iscsi_conn *conn = beiscsi_conn->conn;
1356 struct beiscsi_io_task *io_task = task->dd_data;
1358 hdr = (struct iscsi_tm_rsp *)task->hdr;
1359 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1360 hdr->flags = csol_cqe->i_flags;
1361 hdr->response = csol_cqe->i_resp;
1362 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1363 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1364 csol_cqe->cmd_wnd - 1);
1366 hdr->itt = io_task->libiscsi_itt;
1367 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1371 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1372 struct beiscsi_hba *phba, struct sol_cqe *psol)
1374 struct hwi_wrb_context *pwrb_context;
1375 struct wrb_handle *pwrb_handle = NULL;
1376 struct hwi_controller *phwi_ctrlr;
1377 struct iscsi_task *task;
1378 struct beiscsi_io_task *io_task;
1379 uint16_t wrb_index, cid, cri_index;
1381 phwi_ctrlr = phba->phwi_ctrlr;
1382 if (is_chip_be2_be3r(phba)) {
1383 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1385 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1388 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1390 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1394 cri_index = BE_GET_CRI_FROM_CID(cid);
1395 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1396 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1397 task = pwrb_handle->pio_handle;
1399 io_task = task->dd_data;
1400 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
1401 iscsi_put_task(task);
1405 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1406 struct iscsi_task *task,
1407 struct common_sol_cqe *csol_cqe)
1409 struct iscsi_nopin *hdr;
1410 struct iscsi_conn *conn = beiscsi_conn->conn;
1411 struct beiscsi_io_task *io_task = task->dd_data;
1413 hdr = (struct iscsi_nopin *)task->hdr;
1414 hdr->flags = csol_cqe->i_flags;
1415 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1416 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1417 csol_cqe->cmd_wnd - 1);
1419 hdr->opcode = ISCSI_OP_NOOP_IN;
1420 hdr->itt = io_task->libiscsi_itt;
1421 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1424 static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1425 struct sol_cqe *psol,
1426 struct common_sol_cqe *csol_cqe)
1428 if (is_chip_be2_be3r(phba)) {
1429 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1430 i_exp_cmd_sn, psol);
1431 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1433 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1435 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1437 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1439 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1441 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1443 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1445 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1448 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1449 i_exp_cmd_sn, psol);
1450 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1452 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1454 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1456 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1458 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1460 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1462 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1465 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1467 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1469 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
1471 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1473 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
1478 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1479 struct beiscsi_hba *phba, struct sol_cqe *psol)
1481 struct hwi_wrb_context *pwrb_context;
1482 struct wrb_handle *pwrb_handle;
1483 struct iscsi_wrb *pwrb = NULL;
1484 struct hwi_controller *phwi_ctrlr;
1485 struct iscsi_task *task;
1487 struct iscsi_conn *conn = beiscsi_conn->conn;
1488 struct iscsi_session *session = conn->session;
1489 struct common_sol_cqe csol_cqe = {0};
1490 uint16_t cri_index = 0;
1492 phwi_ctrlr = phba->phwi_ctrlr;
1494 /* Copy the elements to a common structure */
1495 adapter_get_sol_cqe(phba, psol, &csol_cqe);
1497 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1498 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1500 pwrb_handle = pwrb_context->pwrb_handle_basestd[
1501 csol_cqe.wrb_index];
1503 task = pwrb_handle->pio_handle;
1504 pwrb = pwrb_handle->pwrb;
1505 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
1507 spin_lock_bh(&session->lock);
1510 case HWH_TYPE_IO_RD:
1511 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1513 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1515 be_complete_io(beiscsi_conn, task, &csol_cqe);
1518 case HWH_TYPE_LOGOUT:
1519 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1520 be_complete_logout(beiscsi_conn, task, &csol_cqe);
1522 be_complete_tmf(beiscsi_conn, task, &csol_cqe);
1525 case HWH_TYPE_LOGIN:
1526 beiscsi_log(phba, KERN_ERR,
1527 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1528 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1529 " hwi_complete_cmd- Solicited path\n");
1533 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1537 beiscsi_log(phba, KERN_WARNING,
1538 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1539 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1540 "wrb_index 0x%x CID 0x%x\n", type,
1546 spin_unlock_bh(&session->lock);
1549 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1550 *pasync_ctx, unsigned int is_header,
1551 unsigned int host_write_ptr)
1554 return &pasync_ctx->async_entry[host_write_ptr].
1557 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1560 static struct async_pdu_handle *
1561 hwi_get_async_handle(struct beiscsi_hba *phba,
1562 struct beiscsi_conn *beiscsi_conn,
1563 struct hwi_async_pdu_context *pasync_ctx,
1564 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1566 struct be_bus_address phys_addr;
1567 struct list_head *pbusy_list;
1568 struct async_pdu_handle *pasync_handle = NULL;
1569 unsigned char is_header = 0;
1570 unsigned int index, dpl;
1572 if (is_chip_be2_be3r(phba)) {
1573 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1575 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1578 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1580 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1584 phys_addr.u.a32.address_lo =
1585 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1586 db_addr_lo) / 32] - dpl);
1587 phys_addr.u.a32.address_hi =
1588 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1591 phys_addr.u.a64.address =
1592 *((unsigned long long *)(&phys_addr.u.a64.address));
1594 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1595 & PDUCQE_CODE_MASK) {
1596 case UNSOL_HDR_NOTIFY:
1599 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1602 case UNSOL_DATA_NOTIFY:
1603 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1608 beiscsi_log(phba, KERN_WARNING,
1609 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1610 "BM_%d : Unexpected code=%d\n",
1611 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1612 code) / 32] & PDUCQE_CODE_MASK);
1616 WARN_ON(list_empty(pbusy_list));
1617 list_for_each_entry(pasync_handle, pbusy_list, link) {
1618 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
1622 WARN_ON(!pasync_handle);
1624 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
1625 beiscsi_conn->beiscsi_conn_cid);
1626 pasync_handle->is_header = is_header;
1627 pasync_handle->buffer_len = dpl;
1630 return pasync_handle;
1634 hwi_update_async_writables(struct beiscsi_hba *phba,
1635 struct hwi_async_pdu_context *pasync_ctx,
1636 unsigned int is_header, unsigned int cq_index)
1638 struct list_head *pbusy_list;
1639 struct async_pdu_handle *pasync_handle;
1640 unsigned int num_entries, writables = 0;
1641 unsigned int *pep_read_ptr, *pwritables;
1643 num_entries = pasync_ctx->num_entries;
1645 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1646 pwritables = &pasync_ctx->async_header.writables;
1648 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1649 pwritables = &pasync_ctx->async_data.writables;
1652 while ((*pep_read_ptr) != cq_index) {
1654 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1656 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1659 WARN_ON(list_empty(pbusy_list));
1661 if (!list_empty(pbusy_list)) {
1662 pasync_handle = list_entry(pbusy_list->next,
1663 struct async_pdu_handle,
1665 WARN_ON(!pasync_handle);
1666 pasync_handle->consumed = 1;
1673 beiscsi_log(phba, KERN_ERR,
1674 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1675 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1680 *pwritables = *pwritables + writables;
1684 static void hwi_free_async_msg(struct beiscsi_hba *phba,
1685 struct hwi_async_pdu_context *pasync_ctx,
1688 struct async_pdu_handle *pasync_handle, *tmp_handle;
1689 struct list_head *plist;
1691 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1692 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1693 list_del(&pasync_handle->link);
1695 if (pasync_handle->is_header) {
1696 list_add_tail(&pasync_handle->link,
1697 &pasync_ctx->async_header.free_list);
1698 pasync_ctx->async_header.free_entries++;
1700 list_add_tail(&pasync_handle->link,
1701 &pasync_ctx->async_data.free_list);
1702 pasync_ctx->async_data.free_entries++;
1706 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1707 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1708 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1711 static struct phys_addr *
1712 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1713 unsigned int is_header, unsigned int host_write_ptr)
1715 struct phys_addr *pasync_sge = NULL;
1718 pasync_sge = pasync_ctx->async_header.ring_base;
1720 pasync_sge = pasync_ctx->async_data.ring_base;
1722 return pasync_sge + host_write_ptr;
1725 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1726 unsigned int is_header, uint8_t ulp_num)
1728 struct hwi_controller *phwi_ctrlr;
1729 struct hwi_async_pdu_context *pasync_ctx;
1730 struct async_pdu_handle *pasync_handle;
1731 struct list_head *pfree_link, *pbusy_list;
1732 struct phys_addr *pasync_sge;
1733 unsigned int ring_id, num_entries;
1734 unsigned int host_write_num, doorbell_offset;
1735 unsigned int writables;
1739 phwi_ctrlr = phba->phwi_ctrlr;
1740 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1741 num_entries = pasync_ctx->num_entries;
1744 writables = min(pasync_ctx->async_header.writables,
1745 pasync_ctx->async_header.free_entries);
1746 pfree_link = pasync_ctx->async_header.free_list.next;
1747 host_write_num = pasync_ctx->async_header.host_write_ptr;
1748 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1749 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1752 writables = min(pasync_ctx->async_data.writables,
1753 pasync_ctx->async_data.free_entries);
1754 pfree_link = pasync_ctx->async_data.free_list.next;
1755 host_write_num = pasync_ctx->async_data.host_write_ptr;
1756 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1757 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1761 writables = (writables / 8) * 8;
1763 for (i = 0; i < writables; i++) {
1765 hwi_get_async_busy_list(pasync_ctx, is_header,
1768 list_entry(pfree_link, struct async_pdu_handle,
1770 WARN_ON(!pasync_handle);
1771 pasync_handle->consumed = 0;
1773 pfree_link = pfree_link->next;
1775 pasync_sge = hwi_get_ring_address(pasync_ctx,
1776 is_header, host_write_num);
1778 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1779 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1781 list_move(&pasync_handle->link, pbusy_list);
1784 host_write_num = host_write_num % num_entries;
1788 pasync_ctx->async_header.host_write_ptr =
1790 pasync_ctx->async_header.free_entries -= writables;
1791 pasync_ctx->async_header.writables -= writables;
1792 pasync_ctx->async_header.busy_entries += writables;
1794 pasync_ctx->async_data.host_write_ptr = host_write_num;
1795 pasync_ctx->async_data.free_entries -= writables;
1796 pasync_ctx->async_data.writables -= writables;
1797 pasync_ctx->async_data.busy_entries += writables;
1800 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1801 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1802 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1803 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1804 << DB_DEF_PDU_CQPROC_SHIFT;
1806 iowrite32(doorbell, phba->db_va + doorbell_offset);
1810 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1811 struct beiscsi_conn *beiscsi_conn,
1812 struct i_t_dpdu_cqe *pdpdu_cqe)
1814 struct hwi_controller *phwi_ctrlr;
1815 struct hwi_async_pdu_context *pasync_ctx;
1816 struct async_pdu_handle *pasync_handle = NULL;
1817 unsigned int cq_index = -1;
1818 uint16_t cri_index = BE_GET_CRI_FROM_CID(
1819 beiscsi_conn->beiscsi_conn_cid);
1821 phwi_ctrlr = phba->phwi_ctrlr;
1822 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1823 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1826 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1827 pdpdu_cqe, &cq_index);
1828 BUG_ON(pasync_handle->is_header != 0);
1829 if (pasync_handle->consumed == 0)
1830 hwi_update_async_writables(phba, pasync_ctx,
1831 pasync_handle->is_header, cq_index);
1833 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
1834 hwi_post_async_buffers(phba, pasync_handle->is_header,
1835 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1840 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1841 struct beiscsi_hba *phba,
1842 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1844 struct list_head *plist;
1845 struct async_pdu_handle *pasync_handle;
1847 unsigned int hdr_len = 0, buf_len = 0;
1848 unsigned int status, index = 0, offset = 0;
1849 void *pfirst_buffer = NULL;
1850 unsigned int num_buf = 0;
1852 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1854 list_for_each_entry(pasync_handle, plist, link) {
1856 phdr = pasync_handle->pbuffer;
1857 hdr_len = pasync_handle->buffer_len;
1859 buf_len = pasync_handle->buffer_len;
1861 pfirst_buffer = pasync_handle->pbuffer;
1864 memcpy(pfirst_buffer + offset,
1865 pasync_handle->pbuffer, buf_len);
1871 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1872 phdr, hdr_len, pfirst_buffer,
1875 hwi_free_async_msg(phba, pasync_ctx, cri);
1880 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1881 struct beiscsi_hba *phba,
1882 struct async_pdu_handle *pasync_handle)
1884 struct hwi_async_pdu_context *pasync_ctx;
1885 struct hwi_controller *phwi_ctrlr;
1886 unsigned int bytes_needed = 0, status = 0;
1887 unsigned short cri = pasync_handle->cri;
1888 struct pdu_base *ppdu;
1890 phwi_ctrlr = phba->phwi_ctrlr;
1891 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1892 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1893 BE_GET_CRI_FROM_CID(beiscsi_conn->
1894 beiscsi_conn_cid)));
1896 list_del(&pasync_handle->link);
1897 if (pasync_handle->is_header) {
1898 pasync_ctx->async_header.busy_entries--;
1899 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1900 hwi_free_async_msg(phba, pasync_ctx, cri);
1904 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1905 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1906 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1907 (unsigned short)pasync_handle->buffer_len;
1908 list_add_tail(&pasync_handle->link,
1909 &pasync_ctx->async_entry[cri].wait_queue.list);
1911 ppdu = pasync_handle->pbuffer;
1912 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1913 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1914 0xFFFF0000) | ((be16_to_cpu((ppdu->
1915 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1916 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1919 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1922 if (bytes_needed == 0)
1923 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1927 pasync_ctx->async_data.busy_entries--;
1928 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1929 list_add_tail(&pasync_handle->link,
1930 &pasync_ctx->async_entry[cri].wait_queue.
1932 pasync_ctx->async_entry[cri].wait_queue.
1934 (unsigned short)pasync_handle->buffer_len;
1936 if (pasync_ctx->async_entry[cri].wait_queue.
1938 pasync_ctx->async_entry[cri].wait_queue.
1940 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1947 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1948 struct beiscsi_hba *phba,
1949 struct i_t_dpdu_cqe *pdpdu_cqe)
1951 struct hwi_controller *phwi_ctrlr;
1952 struct hwi_async_pdu_context *pasync_ctx;
1953 struct async_pdu_handle *pasync_handle = NULL;
1954 unsigned int cq_index = -1;
1955 uint16_t cri_index = BE_GET_CRI_FROM_CID(
1956 beiscsi_conn->beiscsi_conn_cid);
1958 phwi_ctrlr = phba->phwi_ctrlr;
1959 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1960 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1963 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1964 pdpdu_cqe, &cq_index);
1966 if (pasync_handle->consumed == 0)
1967 hwi_update_async_writables(phba, pasync_ctx,
1968 pasync_handle->is_header, cq_index);
1970 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1971 hwi_post_async_buffers(phba, pasync_handle->is_header,
1972 BEISCSI_GET_ULP_FROM_CRI(
1973 phwi_ctrlr, cri_index));
1976 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1978 struct be_queue_info *mcc_cq;
1979 struct be_mcc_compl *mcc_compl;
1980 unsigned int num_processed = 0;
1982 mcc_cq = &phba->ctrl.mcc_obj.cq;
1983 mcc_compl = queue_tail_node(mcc_cq);
1984 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1985 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1987 if (num_processed >= 32) {
1988 hwi_ring_cq_db(phba, mcc_cq->id,
1989 num_processed, 0, 0);
1992 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1993 /* Interpret flags as an async trailer */
1994 if (is_link_state_evt(mcc_compl->flags))
1995 /* Interpret compl as a async link evt */
1996 beiscsi_async_link_state_process(phba,
1997 (struct be_async_event_link_state *) mcc_compl);
1999 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
2000 "BM_%d : Unsupported Async Event, flags"
2003 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
2004 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
2005 atomic_dec(&phba->ctrl.mcc_obj.q.used);
2008 mcc_compl->flags = 0;
2009 queue_tail_inc(mcc_cq);
2010 mcc_compl = queue_tail_node(mcc_cq);
2011 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2015 if (num_processed > 0)
2016 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
2021 * beiscsi_process_cq()- Process the Completion Queue
2022 * @pbe_eq: Event Q on which the Completion has come
2025 * Number of Completion Entries processed.
2027 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2029 struct be_queue_info *cq;
2030 struct sol_cqe *sol;
2031 struct dmsg_cqe *dmsg;
2032 unsigned int num_processed = 0;
2033 unsigned int tot_nump = 0;
2034 unsigned short code = 0, cid = 0;
2035 uint16_t cri_index = 0;
2036 struct beiscsi_conn *beiscsi_conn;
2037 struct beiscsi_endpoint *beiscsi_ep;
2038 struct iscsi_endpoint *ep;
2039 struct beiscsi_hba *phba;
2042 sol = queue_tail_node(cq);
2043 phba = pbe_eq->phba;
2045 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
2047 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
2049 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
2050 32] & CQE_CODE_MASK);
2053 if (is_chip_be2_be3r(phba)) {
2054 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2056 if ((code == DRIVERMSG_NOTIFY) ||
2057 (code == UNSOL_HDR_NOTIFY) ||
2058 (code == UNSOL_DATA_NOTIFY))
2059 cid = AMAP_GET_BITS(
2060 struct amap_i_t_dpdu_cqe_v2,
2063 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
2067 cri_index = BE_GET_CRI_FROM_CID(cid);
2068 ep = phba->ep_array[cri_index];
2069 beiscsi_ep = ep->dd_data;
2070 beiscsi_conn = beiscsi_ep->conn;
2072 if (num_processed >= 32) {
2073 hwi_ring_cq_db(phba, cq->id,
2074 num_processed, 0, 0);
2075 tot_nump += num_processed;
2080 case SOL_CMD_COMPLETE:
2081 hwi_complete_cmd(beiscsi_conn, phba, sol);
2083 case DRIVERMSG_NOTIFY:
2084 beiscsi_log(phba, KERN_INFO,
2085 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2086 "BM_%d : Received %s[%d] on CID : %d\n",
2087 cqe_desc[code], code, cid);
2089 dmsg = (struct dmsg_cqe *)sol;
2090 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2092 case UNSOL_HDR_NOTIFY:
2093 beiscsi_log(phba, KERN_INFO,
2094 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2095 "BM_%d : Received %s[%d] on CID : %d\n",
2096 cqe_desc[code], code, cid);
2098 spin_lock_bh(&phba->async_pdu_lock);
2099 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2100 (struct i_t_dpdu_cqe *)sol);
2101 spin_unlock_bh(&phba->async_pdu_lock);
2103 case UNSOL_DATA_NOTIFY:
2104 beiscsi_log(phba, KERN_INFO,
2105 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2106 "BM_%d : Received %s[%d] on CID : %d\n",
2107 cqe_desc[code], code, cid);
2109 spin_lock_bh(&phba->async_pdu_lock);
2110 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2111 (struct i_t_dpdu_cqe *)sol);
2112 spin_unlock_bh(&phba->async_pdu_lock);
2114 case CXN_INVALIDATE_INDEX_NOTIFY:
2115 case CMD_INVALIDATED_NOTIFY:
2116 case CXN_INVALIDATE_NOTIFY:
2117 beiscsi_log(phba, KERN_ERR,
2118 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2119 "BM_%d : Ignoring %s[%d] on CID : %d\n",
2120 cqe_desc[code], code, cid);
2122 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2123 case CMD_KILLED_INVALID_STATSN_RCVD:
2124 case CMD_KILLED_INVALID_R2T_RCVD:
2125 case CMD_CXN_KILLED_LUN_INVALID:
2126 case CMD_CXN_KILLED_ICD_INVALID:
2127 case CMD_CXN_KILLED_ITT_INVALID:
2128 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2129 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
2130 beiscsi_log(phba, KERN_ERR,
2131 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2132 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2133 cqe_desc[code], code, cid);
2135 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
2136 beiscsi_log(phba, KERN_ERR,
2137 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2138 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2139 cqe_desc[code], code, cid);
2140 spin_lock_bh(&phba->async_pdu_lock);
2141 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2142 (struct i_t_dpdu_cqe *) sol);
2143 spin_unlock_bh(&phba->async_pdu_lock);
2145 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2146 case CXN_KILLED_BURST_LEN_MISMATCH:
2147 case CXN_KILLED_AHS_RCVD:
2148 case CXN_KILLED_HDR_DIGEST_ERR:
2149 case CXN_KILLED_UNKNOWN_HDR:
2150 case CXN_KILLED_STALE_ITT_TTT_RCVD:
2151 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2152 case CXN_KILLED_TIMED_OUT:
2153 case CXN_KILLED_FIN_RCVD:
2154 case CXN_KILLED_RST_SENT:
2155 case CXN_KILLED_RST_RCVD:
2156 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2157 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2158 case CXN_KILLED_OVER_RUN_RESIDUAL:
2159 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2160 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
2161 beiscsi_log(phba, KERN_ERR,
2162 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2163 "BM_%d : Event %s[%d] received on CID : %d\n",
2164 cqe_desc[code], code, cid);
2166 iscsi_conn_failure(beiscsi_conn->conn,
2167 ISCSI_ERR_CONN_FAILED);
2170 beiscsi_log(phba, KERN_ERR,
2171 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2172 "BM_%d : Invalid CQE Event Received Code : %d"
2178 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2180 sol = queue_tail_node(cq);
2184 if (num_processed > 0) {
2185 tot_nump += num_processed;
2186 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
2191 void beiscsi_process_all_cqs(struct work_struct *work)
2193 unsigned long flags;
2194 struct hwi_controller *phwi_ctrlr;
2195 struct hwi_context_memory *phwi_context;
2196 struct beiscsi_hba *phba;
2197 struct be_eq_obj *pbe_eq =
2198 container_of(work, struct be_eq_obj, work_cqs);
2200 phba = pbe_eq->phba;
2201 phwi_ctrlr = phba->phwi_ctrlr;
2202 phwi_context = phwi_ctrlr->phwi_ctxt;
2204 if (pbe_eq->todo_mcc_cq) {
2205 spin_lock_irqsave(&phba->isr_lock, flags);
2206 pbe_eq->todo_mcc_cq = false;
2207 spin_unlock_irqrestore(&phba->isr_lock, flags);
2208 beiscsi_process_mcc_isr(phba);
2211 if (pbe_eq->todo_cq) {
2212 spin_lock_irqsave(&phba->isr_lock, flags);
2213 pbe_eq->todo_cq = false;
2214 spin_unlock_irqrestore(&phba->isr_lock, flags);
2215 beiscsi_process_cq(pbe_eq);
2218 /* rearm EQ for further interrupts */
2219 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2222 static int be_iopoll(struct blk_iopoll *iop, int budget)
2225 struct beiscsi_hba *phba;
2226 struct be_eq_obj *pbe_eq;
2228 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2229 ret = beiscsi_process_cq(pbe_eq);
2231 phba = pbe_eq->phba;
2232 blk_iopoll_complete(iop);
2233 beiscsi_log(phba, KERN_INFO,
2234 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2235 "BM_%d : rearm pbe_eq->q.id =%d\n",
2237 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2243 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2244 unsigned int num_sg, struct beiscsi_io_task *io_task)
2246 struct iscsi_sge *psgl;
2247 unsigned int sg_len, index;
2248 unsigned int sge_len = 0;
2249 unsigned long long addr;
2250 struct scatterlist *l_sg;
2251 unsigned int offset;
2253 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2254 io_task->bhs_pa.u.a32.address_lo);
2255 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2256 io_task->bhs_pa.u.a32.address_hi);
2259 for (index = 0; (index < num_sg) && (index < 2); index++,
2262 sg_len = sg_dma_len(sg);
2263 addr = (u64) sg_dma_address(sg);
2264 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2266 lower_32_bits(addr));
2267 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2269 upper_32_bits(addr));
2270 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2275 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2277 sg_len = sg_dma_len(sg);
2278 addr = (u64) sg_dma_address(sg);
2279 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2281 lower_32_bits(addr));
2282 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2284 upper_32_bits(addr));
2285 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2290 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2291 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2293 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2295 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2296 io_task->bhs_pa.u.a32.address_hi);
2297 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2298 io_task->bhs_pa.u.a32.address_lo);
2301 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2303 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2305 } else if (num_sg == 2) {
2306 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2308 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2311 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2313 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2321 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2322 sg_len = sg_dma_len(sg);
2323 addr = (u64) sg_dma_address(sg);
2324 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2325 lower_32_bits(addr));
2326 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2327 upper_32_bits(addr));
2328 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2329 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2330 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2334 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2338 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2339 unsigned int num_sg, struct beiscsi_io_task *io_task)
2341 struct iscsi_sge *psgl;
2342 unsigned int sg_len, index;
2343 unsigned int sge_len = 0;
2344 unsigned long long addr;
2345 struct scatterlist *l_sg;
2346 unsigned int offset;
2348 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2349 io_task->bhs_pa.u.a32.address_lo);
2350 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2351 io_task->bhs_pa.u.a32.address_hi);
2354 for (index = 0; (index < num_sg) && (index < 2); index++,
2357 sg_len = sg_dma_len(sg);
2358 addr = (u64) sg_dma_address(sg);
2359 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2360 ((u32)(addr & 0xFFFFFFFF)));
2361 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2362 ((u32)(addr >> 32)));
2363 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2367 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2369 sg_len = sg_dma_len(sg);
2370 addr = (u64) sg_dma_address(sg);
2371 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2372 ((u32)(addr & 0xFFFFFFFF)));
2373 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2374 ((u32)(addr >> 32)));
2375 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2379 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2380 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2382 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2384 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2385 io_task->bhs_pa.u.a32.address_hi);
2386 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2387 io_task->bhs_pa.u.a32.address_lo);
2390 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2392 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2394 } else if (num_sg == 2) {
2395 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2397 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2400 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2402 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2409 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2410 sg_len = sg_dma_len(sg);
2411 addr = (u64) sg_dma_address(sg);
2412 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2413 (addr & 0xFFFFFFFF));
2414 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2416 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2417 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2418 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2422 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2426 * hwi_write_buffer()- Populate the WRB with task info
2427 * @pwrb: ptr to the WRB entry
2428 * @task: iscsi task which is to be executed
2430 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2432 struct iscsi_sge *psgl;
2433 struct beiscsi_io_task *io_task = task->dd_data;
2434 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2435 struct beiscsi_hba *phba = beiscsi_conn->phba;
2436 uint8_t dsp_value = 0;
2438 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2439 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2440 io_task->bhs_pa.u.a32.address_lo);
2441 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2442 io_task->bhs_pa.u.a32.address_hi);
2446 /* Check for the data_count */
2447 dsp_value = (task->data_count) ? 1 : 0;
2449 if (is_chip_be2_be3r(phba))
2450 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2453 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2456 /* Map addr only if there is data_count */
2458 io_task->mtask_addr = pci_map_single(phba->pcidev,
2462 io_task->mtask_data_count = task->data_count;
2464 io_task->mtask_addr = 0;
2466 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2467 lower_32_bits(io_task->mtask_addr));
2468 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2469 upper_32_bits(io_task->mtask_addr));
2470 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2473 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2475 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2476 io_task->mtask_addr = 0;
2479 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2481 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2483 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2484 io_task->bhs_pa.u.a32.address_hi);
2485 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2486 io_task->bhs_pa.u.a32.address_lo);
2489 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2490 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2491 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2492 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2493 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2494 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2498 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2499 lower_32_bits(io_task->mtask_addr));
2500 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2501 upper_32_bits(io_task->mtask_addr));
2503 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2505 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2509 * beiscsi_find_mem_req()- Find mem needed
2510 * @phba: ptr to HBA struct
2512 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2514 uint8_t mem_descr_index, ulp_num;
2515 unsigned int num_cq_pages, num_async_pdu_buf_pages;
2516 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2517 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2519 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2520 sizeof(struct sol_cqe));
2522 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2524 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2525 BE_ISCSI_PDU_HEADER_SIZE;
2526 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2527 sizeof(struct hwi_context_memory);
2530 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2531 * (phba->params.wrbs_per_cxn)
2532 * phba->params.cxns_per_ctrl;
2533 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2534 (phba->params.wrbs_per_cxn);
2535 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2536 phba->params.cxns_per_ctrl);
2538 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2539 phba->params.icds_per_ctrl;
2540 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2541 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2542 phba->mem_req[HWI_MEM_TEMPLATE_HDR] = phba->params.cxns_per_ctrl *
2543 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2544 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2545 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2547 num_async_pdu_buf_sgl_pages =
2548 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2550 sizeof(struct phys_addr));
2552 num_async_pdu_buf_pages =
2553 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2555 phba->params.defpdu_hdr_sz);
2557 num_async_pdu_data_pages =
2558 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2560 phba->params.defpdu_data_sz);
2562 num_async_pdu_data_sgl_pages =
2563 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2565 sizeof(struct phys_addr));
2567 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2568 (ulp_num * MEM_DESCR_OFFSET));
2569 phba->mem_req[mem_descr_index] =
2570 num_async_pdu_buf_pages *
2573 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2574 (ulp_num * MEM_DESCR_OFFSET));
2575 phba->mem_req[mem_descr_index] =
2576 num_async_pdu_data_pages *
2579 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2580 (ulp_num * MEM_DESCR_OFFSET));
2581 phba->mem_req[mem_descr_index] =
2582 num_async_pdu_buf_sgl_pages *
2585 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2586 (ulp_num * MEM_DESCR_OFFSET));
2587 phba->mem_req[mem_descr_index] =
2588 num_async_pdu_data_sgl_pages *
2591 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2592 (ulp_num * MEM_DESCR_OFFSET));
2593 phba->mem_req[mem_descr_index] =
2594 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2595 sizeof(struct async_pdu_handle);
2597 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2598 (ulp_num * MEM_DESCR_OFFSET));
2599 phba->mem_req[mem_descr_index] =
2600 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2601 sizeof(struct async_pdu_handle);
2603 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2604 (ulp_num * MEM_DESCR_OFFSET));
2605 phba->mem_req[mem_descr_index] =
2606 sizeof(struct hwi_async_pdu_context) +
2607 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2608 sizeof(struct hwi_async_entry));
2613 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2616 struct hwi_controller *phwi_ctrlr;
2617 struct be_mem_descriptor *mem_descr;
2618 struct mem_array *mem_arr, *mem_arr_orig;
2619 unsigned int i, j, alloc_size, curr_alloc_size;
2621 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2622 if (!phba->phwi_ctrlr)
2625 /* Allocate memory for wrb_context */
2626 phwi_ctrlr = phba->phwi_ctrlr;
2627 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
2628 phba->params.cxns_per_ctrl,
2630 if (!phwi_ctrlr->wrb_context)
2633 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2635 if (!phba->init_mem) {
2636 kfree(phwi_ctrlr->wrb_context);
2637 kfree(phba->phwi_ctrlr);
2641 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2643 if (!mem_arr_orig) {
2644 kfree(phba->init_mem);
2645 kfree(phwi_ctrlr->wrb_context);
2646 kfree(phba->phwi_ctrlr);
2650 mem_descr = phba->init_mem;
2651 for (i = 0; i < SE_MEM_MAX; i++) {
2652 if (!phba->mem_req[i]) {
2653 mem_descr->mem_array = NULL;
2659 mem_arr = mem_arr_orig;
2660 alloc_size = phba->mem_req[i];
2661 memset(mem_arr, 0, sizeof(struct mem_array) *
2662 BEISCSI_MAX_FRAGS_INIT);
2663 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2665 mem_arr->virtual_address = pci_alloc_consistent(
2669 if (!mem_arr->virtual_address) {
2670 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2672 if (curr_alloc_size -
2673 rounddown_pow_of_two(curr_alloc_size))
2674 curr_alloc_size = rounddown_pow_of_two
2677 curr_alloc_size = curr_alloc_size / 2;
2679 mem_arr->bus_address.u.
2680 a64.address = (__u64) bus_add;
2681 mem_arr->size = curr_alloc_size;
2682 alloc_size -= curr_alloc_size;
2683 curr_alloc_size = min(be_max_phys_size *
2688 } while (alloc_size);
2689 mem_descr->num_elements = j;
2690 mem_descr->size_in_bytes = phba->mem_req[i];
2691 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2693 if (!mem_descr->mem_array)
2696 memcpy(mem_descr->mem_array, mem_arr_orig,
2697 sizeof(struct mem_array) * j);
2700 kfree(mem_arr_orig);
2703 mem_descr->num_elements = j;
2704 while ((i) || (j)) {
2705 for (j = mem_descr->num_elements; j > 0; j--) {
2706 pci_free_consistent(phba->pcidev,
2707 mem_descr->mem_array[j - 1].size,
2708 mem_descr->mem_array[j - 1].
2710 (unsigned long)mem_descr->
2712 bus_address.u.a64.address);
2716 kfree(mem_descr->mem_array);
2720 kfree(mem_arr_orig);
2721 kfree(phba->init_mem);
2722 kfree(phba->phwi_ctrlr->wrb_context);
2723 kfree(phba->phwi_ctrlr);
2727 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2729 beiscsi_find_mem_req(phba);
2730 return beiscsi_alloc_mem(phba);
2733 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2735 struct pdu_data_out *pdata_out;
2736 struct pdu_nop_out *pnop_out;
2737 struct be_mem_descriptor *mem_descr;
2739 mem_descr = phba->init_mem;
2740 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2742 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2743 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2745 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2749 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2750 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2752 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2753 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2754 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2755 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2758 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2760 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2761 struct hwi_context_memory *phwi_ctxt;
2762 struct wrb_handle *pwrb_handle = NULL;
2763 struct hwi_controller *phwi_ctrlr;
2764 struct hwi_wrb_context *pwrb_context;
2765 struct iscsi_wrb *pwrb = NULL;
2766 unsigned int num_cxn_wrbh = 0;
2767 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2769 mem_descr_wrbh = phba->init_mem;
2770 mem_descr_wrbh += HWI_MEM_WRBH;
2772 mem_descr_wrb = phba->init_mem;
2773 mem_descr_wrb += HWI_MEM_WRB;
2774 phwi_ctrlr = phba->phwi_ctrlr;
2776 /* Allocate memory for WRBQ */
2777 phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2778 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
2779 phba->params.cxns_per_ctrl,
2781 if (!phwi_ctxt->be_wrbq) {
2782 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2783 "BM_%d : WRBQ Mem Alloc Failed\n");
2787 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2788 pwrb_context = &phwi_ctrlr->wrb_context[index];
2789 pwrb_context->pwrb_handle_base =
2790 kzalloc(sizeof(struct wrb_handle *) *
2791 phba->params.wrbs_per_cxn, GFP_KERNEL);
2792 if (!pwrb_context->pwrb_handle_base) {
2793 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2794 "BM_%d : Mem Alloc Failed. Failing to load\n");
2795 goto init_wrb_hndl_failed;
2797 pwrb_context->pwrb_handle_basestd =
2798 kzalloc(sizeof(struct wrb_handle *) *
2799 phba->params.wrbs_per_cxn, GFP_KERNEL);
2800 if (!pwrb_context->pwrb_handle_basestd) {
2801 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2802 "BM_%d : Mem Alloc Failed. Failing to load\n");
2803 goto init_wrb_hndl_failed;
2805 if (!num_cxn_wrbh) {
2807 mem_descr_wrbh->mem_array[idx].virtual_address;
2808 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2809 ((sizeof(struct wrb_handle)) *
2810 phba->params.wrbs_per_cxn));
2813 pwrb_context->alloc_index = 0;
2814 pwrb_context->wrb_handles_available = 0;
2815 pwrb_context->free_index = 0;
2818 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2819 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2820 pwrb_context->pwrb_handle_basestd[j] =
2822 pwrb_context->wrb_handles_available++;
2823 pwrb_handle->wrb_index = j;
2830 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2831 pwrb_context = &phwi_ctrlr->wrb_context[index];
2833 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2834 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2835 ((sizeof(struct iscsi_wrb) *
2836 phba->params.wrbs_per_cxn));
2841 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2842 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2843 pwrb_handle->pwrb = pwrb;
2850 init_wrb_hndl_failed:
2851 for (j = index; j > 0; j--) {
2852 pwrb_context = &phwi_ctrlr->wrb_context[j];
2853 kfree(pwrb_context->pwrb_handle_base);
2854 kfree(pwrb_context->pwrb_handle_basestd);
2859 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2862 struct hwi_controller *phwi_ctrlr;
2863 struct hba_parameters *p = &phba->params;
2864 struct hwi_async_pdu_context *pasync_ctx;
2865 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2866 unsigned int index, idx, num_per_mem, num_async_data;
2867 struct be_mem_descriptor *mem_descr;
2869 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2870 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2872 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2873 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2874 (ulp_num * MEM_DESCR_OFFSET));
2876 phwi_ctrlr = phba->phwi_ctrlr;
2877 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2878 (struct hwi_async_pdu_context *)
2879 mem_descr->mem_array[0].virtual_address;
2881 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2882 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2884 pasync_ctx->async_entry =
2885 (struct hwi_async_entry *)
2886 ((long unsigned int)pasync_ctx +
2887 sizeof(struct hwi_async_pdu_context));
2889 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
2891 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2893 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2894 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2895 (ulp_num * MEM_DESCR_OFFSET);
2896 if (mem_descr->mem_array[0].virtual_address) {
2897 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2898 "BM_%d : hwi_init_async_pdu_ctx"
2899 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2901 mem_descr->mem_array[0].
2904 beiscsi_log(phba, KERN_WARNING,
2906 "BM_%d : No Virtual address for ULP : %d\n",
2909 pasync_ctx->async_header.va_base =
2910 mem_descr->mem_array[0].virtual_address;
2912 pasync_ctx->async_header.pa_base.u.a64.address =
2913 mem_descr->mem_array[0].
2914 bus_address.u.a64.address;
2916 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2917 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2918 (ulp_num * MEM_DESCR_OFFSET);
2919 if (mem_descr->mem_array[0].virtual_address) {
2920 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2921 "BM_%d : hwi_init_async_pdu_ctx"
2922 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
2924 mem_descr->mem_array[0].
2927 beiscsi_log(phba, KERN_WARNING,
2929 "BM_%d : No Virtual address for ULP : %d\n",
2932 pasync_ctx->async_header.ring_base =
2933 mem_descr->mem_array[0].virtual_address;
2935 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2936 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2937 (ulp_num * MEM_DESCR_OFFSET);
2938 if (mem_descr->mem_array[0].virtual_address) {
2939 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2940 "BM_%d : hwi_init_async_pdu_ctx"
2941 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
2943 mem_descr->mem_array[0].
2946 beiscsi_log(phba, KERN_WARNING,
2948 "BM_%d : No Virtual address for ULP : %d\n",
2951 pasync_ctx->async_header.handle_base =
2952 mem_descr->mem_array[0].virtual_address;
2953 pasync_ctx->async_header.writables = 0;
2954 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2956 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2957 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
2958 (ulp_num * MEM_DESCR_OFFSET);
2959 if (mem_descr->mem_array[0].virtual_address) {
2960 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2961 "BM_%d : hwi_init_async_pdu_ctx"
2962 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
2964 mem_descr->mem_array[0].
2967 beiscsi_log(phba, KERN_WARNING,
2969 "BM_%d : No Virtual address for ULP : %d\n",
2972 pasync_ctx->async_data.ring_base =
2973 mem_descr->mem_array[0].virtual_address;
2975 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2976 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2977 (ulp_num * MEM_DESCR_OFFSET);
2978 if (!mem_descr->mem_array[0].virtual_address)
2979 beiscsi_log(phba, KERN_WARNING,
2981 "BM_%d : No Virtual address for ULP : %d\n",
2984 pasync_ctx->async_data.handle_base =
2985 mem_descr->mem_array[0].virtual_address;
2986 pasync_ctx->async_data.writables = 0;
2987 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2990 (struct async_pdu_handle *)
2991 pasync_ctx->async_header.handle_base;
2993 (struct async_pdu_handle *)
2994 pasync_ctx->async_data.handle_base;
2996 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2997 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2998 (ulp_num * MEM_DESCR_OFFSET);
2999 if (mem_descr->mem_array[0].virtual_address) {
3000 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3001 "BM_%d : hwi_init_async_pdu_ctx"
3002 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
3004 mem_descr->mem_array[0].
3007 beiscsi_log(phba, KERN_WARNING,
3009 "BM_%d : No Virtual address for ULP : %d\n",
3013 pasync_ctx->async_data.va_base =
3014 mem_descr->mem_array[idx].virtual_address;
3015 pasync_ctx->async_data.pa_base.u.a64.address =
3016 mem_descr->mem_array[idx].
3017 bus_address.u.a64.address;
3019 num_async_data = ((mem_descr->mem_array[idx].size) /
3020 phba->params.defpdu_data_sz);
3023 for (index = 0; index < BEISCSI_GET_CID_COUNT
3024 (phba, ulp_num); index++) {
3025 pasync_header_h->cri = -1;
3026 pasync_header_h->index = (char)index;
3027 INIT_LIST_HEAD(&pasync_header_h->link);
3028 pasync_header_h->pbuffer =
3029 (void *)((unsigned long)
3031 async_header.va_base) +
3032 (p->defpdu_hdr_sz * index));
3034 pasync_header_h->pa.u.a64.address =
3035 pasync_ctx->async_header.pa_base.u.a64.
3036 address + (p->defpdu_hdr_sz * index);
3038 list_add_tail(&pasync_header_h->link,
3039 &pasync_ctx->async_header.
3042 pasync_ctx->async_header.free_entries++;
3043 pasync_ctx->async_header.writables++;
3045 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3047 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3049 pasync_data_h->cri = -1;
3050 pasync_data_h->index = (char)index;
3051 INIT_LIST_HEAD(&pasync_data_h->link);
3053 if (!num_async_data) {
3056 pasync_ctx->async_data.va_base =
3057 mem_descr->mem_array[idx].
3059 pasync_ctx->async_data.pa_base.u.
3061 mem_descr->mem_array[idx].
3062 bus_address.u.a64.address;
3064 ((mem_descr->mem_array[idx].
3066 phba->params.defpdu_data_sz);
3068 pasync_data_h->pbuffer =
3069 (void *)((unsigned long)
3070 (pasync_ctx->async_data.va_base) +
3071 (p->defpdu_data_sz * num_per_mem));
3073 pasync_data_h->pa.u.a64.address =
3074 pasync_ctx->async_data.pa_base.u.a64.
3075 address + (p->defpdu_data_sz *
3080 list_add_tail(&pasync_data_h->link,
3081 &pasync_ctx->async_data.
3084 pasync_ctx->async_data.free_entries++;
3085 pasync_ctx->async_data.writables++;
3087 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3091 pasync_ctx->async_header.host_write_ptr = 0;
3092 pasync_ctx->async_header.ep_read_ptr = -1;
3093 pasync_ctx->async_data.host_write_ptr = 0;
3094 pasync_ctx->async_data.ep_read_ptr = -1;
3102 be_sgl_create_contiguous(void *virtual_address,
3103 u64 physical_address, u32 length,
3104 struct be_dma_mem *sgl)
3106 WARN_ON(!virtual_address);
3107 WARN_ON(!physical_address);
3108 WARN_ON(!length > 0);
3111 sgl->va = virtual_address;
3112 sgl->dma = (unsigned long)physical_address;
3118 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
3120 memset(sgl, 0, sizeof(*sgl));
3124 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
3125 struct mem_array *pmem, struct be_dma_mem *sgl)
3128 be_sgl_destroy_contiguous(sgl);
3130 be_sgl_create_contiguous(pmem->virtual_address,
3131 pmem->bus_address.u.a64.address,
3136 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
3137 struct mem_array *pmem, struct be_dma_mem *sgl)
3140 be_sgl_destroy_contiguous(sgl);
3142 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
3143 pmem->bus_address.u.a64.address,
3147 static int be_fill_queue(struct be_queue_info *q,
3148 u16 len, u16 entry_size, void *vaddress)
3150 struct be_dma_mem *mem = &q->dma_mem;
3152 memset(q, 0, sizeof(*q));
3154 q->entry_size = entry_size;
3155 mem->size = len * entry_size;
3159 memset(mem->va, 0, mem->size);
3163 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3164 struct hwi_context_memory *phwi_context)
3166 unsigned int i, num_eq_pages;
3167 int ret = 0, eq_for_mcc;
3168 struct be_queue_info *eq;
3169 struct be_dma_mem *mem;
3173 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
3174 sizeof(struct be_eq_entry));
3176 if (phba->msix_enabled)
3180 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3181 eq = &phwi_context->be_eq[i].q;
3183 phwi_context->be_eq[i].phba = phba;
3184 eq_vaddress = pci_alloc_consistent(phba->pcidev,
3185 num_eq_pages * PAGE_SIZE,
3188 goto create_eq_error;
3190 mem->va = eq_vaddress;
3191 ret = be_fill_queue(eq, phba->params.num_eq_entries,
3192 sizeof(struct be_eq_entry), eq_vaddress);
3194 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3195 "BM_%d : be_fill_queue Failed for EQ\n");
3196 goto create_eq_error;
3200 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3201 phwi_context->cur_eqd);
3203 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3204 "BM_%d : beiscsi_cmd_eq_create"
3206 goto create_eq_error;
3209 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3210 "BM_%d : eqid = %d\n",
3211 phwi_context->be_eq[i].q.id);
3215 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3216 eq = &phwi_context->be_eq[i].q;
3219 pci_free_consistent(phba->pcidev, num_eq_pages
3226 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3227 struct hwi_context_memory *phwi_context)
3229 unsigned int i, num_cq_pages;
3231 struct be_queue_info *cq, *eq;
3232 struct be_dma_mem *mem;
3233 struct be_eq_obj *pbe_eq;
3237 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
3238 sizeof(struct sol_cqe));
3240 for (i = 0; i < phba->num_cpus; i++) {
3241 cq = &phwi_context->be_cq[i];
3242 eq = &phwi_context->be_eq[i].q;
3243 pbe_eq = &phwi_context->be_eq[i];
3245 pbe_eq->phba = phba;
3247 cq_vaddress = pci_alloc_consistent(phba->pcidev,
3248 num_cq_pages * PAGE_SIZE,
3251 goto create_cq_error;
3252 ret = be_fill_queue(cq, phba->params.num_cq_entries,
3253 sizeof(struct sol_cqe), cq_vaddress);
3255 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3256 "BM_%d : be_fill_queue Failed "
3258 goto create_cq_error;
3262 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3265 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3266 "BM_%d : beiscsi_cmd_eq_create"
3267 "Failed for ISCSI CQ\n");
3268 goto create_cq_error;
3270 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3271 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3272 "iSCSI CQ CREATED\n", cq->id, eq->id);
3277 for (i = 0; i < phba->num_cpus; i++) {
3278 cq = &phwi_context->be_cq[i];
3281 pci_free_consistent(phba->pcidev, num_cq_pages
3290 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3291 struct hwi_context_memory *phwi_context,
3292 struct hwi_controller *phwi_ctrlr,
3293 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3297 struct be_queue_info *dq, *cq;
3298 struct be_dma_mem *mem;
3299 struct be_mem_descriptor *mem_descr;
3303 dq = &phwi_context->be_def_hdrq[ulp_num];
3304 cq = &phwi_context->be_cq[0];
3306 mem_descr = phba->init_mem;
3307 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3308 (ulp_num * MEM_DESCR_OFFSET);
3309 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3310 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3311 sizeof(struct phys_addr),
3312 sizeof(struct phys_addr), dq_vaddress);
3314 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3315 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3320 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3321 bus_address.u.a64.address;
3322 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3324 phba->params.defpdu_hdr_sz,
3325 BEISCSI_DEFQ_HDR, ulp_num);
3327 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3328 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3334 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3335 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3337 phwi_context->be_def_hdrq[ulp_num].id);
3338 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
3343 beiscsi_create_def_data(struct beiscsi_hba *phba,
3344 struct hwi_context_memory *phwi_context,
3345 struct hwi_controller *phwi_ctrlr,
3346 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3350 struct be_queue_info *dataq, *cq;
3351 struct be_dma_mem *mem;
3352 struct be_mem_descriptor *mem_descr;
3356 dataq = &phwi_context->be_def_dataq[ulp_num];
3357 cq = &phwi_context->be_cq[0];
3358 mem = &dataq->dma_mem;
3359 mem_descr = phba->init_mem;
3360 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3361 (ulp_num * MEM_DESCR_OFFSET);
3362 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3363 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3364 sizeof(struct phys_addr),
3365 sizeof(struct phys_addr), dq_vaddress);
3367 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3368 "BM_%d : be_fill_queue Failed for DEF PDU "
3369 "DATA on ULP : %d\n",
3374 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3375 bus_address.u.a64.address;
3376 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3378 phba->params.defpdu_data_sz,
3379 BEISCSI_DEFQ_DATA, ulp_num);
3381 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3382 "BM_%d be_cmd_create_default_pdu_queue"
3383 " Failed for DEF PDU DATA on ULP : %d\n",
3388 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3389 "BM_%d : iscsi def data id on ULP : %d is %d\n",
3391 phwi_context->be_def_dataq[ulp_num].id);
3393 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
3394 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3395 "BM_%d : DEFAULT PDU DATA RING CREATED"
3396 "on ULP : %d\n", ulp_num);
3403 beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3405 struct be_mem_descriptor *mem_descr;
3406 struct mem_array *pm_arr;
3407 struct be_dma_mem sgl;
3410 mem_descr = phba->init_mem;
3411 mem_descr += HWI_MEM_TEMPLATE_HDR;
3412 pm_arr = mem_descr->mem_array;
3414 for (i = 0; i < mem_descr->num_elements; i++) {
3415 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3416 status = be_cmd_iscsi_post_template_hdr(&phba->ctrl, &sgl);
3419 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3420 "BM_%d : Post Template HDR Failed\n");
3425 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3426 "BM_%d : Template HDR Pages Posted\n");
3432 beiscsi_post_pages(struct beiscsi_hba *phba)
3434 struct be_mem_descriptor *mem_descr;
3435 struct mem_array *pm_arr;
3436 unsigned int page_offset, i;
3437 struct be_dma_mem sgl;
3438 int status, ulp_num = 0;
3440 mem_descr = phba->init_mem;
3441 mem_descr += HWI_MEM_SGE;
3442 pm_arr = mem_descr->mem_array;
3444 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3445 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
3446 for (i = 0; i < mem_descr->num_elements; i++) {
3447 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3448 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3450 (pm_arr->size / PAGE_SIZE));
3451 page_offset += pm_arr->size / PAGE_SIZE;
3453 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3454 "BM_%d : post sgl failed.\n");
3459 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3460 "BM_%d : POSTED PAGES\n");
3464 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3466 struct be_dma_mem *mem = &q->dma_mem;
3468 pci_free_consistent(phba->pcidev, mem->size,
3474 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3475 u16 len, u16 entry_size)
3477 struct be_dma_mem *mem = &q->dma_mem;
3479 memset(q, 0, sizeof(*q));
3481 q->entry_size = entry_size;
3482 mem->size = len * entry_size;
3483 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
3486 memset(mem->va, 0, mem->size);
3491 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3492 struct hwi_context_memory *phwi_context,
3493 struct hwi_controller *phwi_ctrlr)
3495 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3497 unsigned int idx, num, i;
3498 struct mem_array *pwrb_arr;
3500 struct be_dma_mem sgl;
3501 struct be_mem_descriptor *mem_descr;
3502 struct hwi_wrb_context *pwrb_context;
3506 mem_descr = phba->init_mem;
3507 mem_descr += HWI_MEM_WRB;
3508 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3511 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3512 "BM_%d : Memory alloc failed in create wrb ring.\n");
3515 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3516 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3517 num_wrb_rings = mem_descr->mem_array[idx].size /
3518 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3520 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3521 if (num_wrb_rings) {
3522 pwrb_arr[num].virtual_address = wrb_vaddr;
3523 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3524 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3525 sizeof(struct iscsi_wrb);
3526 wrb_vaddr += pwrb_arr[num].size;
3527 pa_addr_lo += pwrb_arr[num].size;
3531 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3532 pa_addr_lo = mem_descr->mem_array[idx].\
3533 bus_address.u.a64.address;
3534 num_wrb_rings = mem_descr->mem_array[idx].size /
3535 (phba->params.wrbs_per_cxn *
3536 sizeof(struct iscsi_wrb));
3537 pwrb_arr[num].virtual_address = wrb_vaddr;
3538 pwrb_arr[num].bus_address.u.a64.address\
3540 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3541 sizeof(struct iscsi_wrb);
3542 wrb_vaddr += pwrb_arr[num].size;
3543 pa_addr_lo += pwrb_arr[num].size;
3547 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3552 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3553 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3554 &phwi_context->be_wrbq[i]);
3556 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3557 "BM_%d : wrbq create failed.");
3561 pwrb_context = &phwi_ctrlr->wrb_context[i];
3562 pwrb_context->cid = phwi_context->be_wrbq[i].id;
3563 BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3569 static void free_wrb_handles(struct beiscsi_hba *phba)
3572 struct hwi_controller *phwi_ctrlr;
3573 struct hwi_wrb_context *pwrb_context;
3575 phwi_ctrlr = phba->phwi_ctrlr;
3576 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
3577 pwrb_context = &phwi_ctrlr->wrb_context[index];
3578 kfree(pwrb_context->pwrb_handle_base);
3579 kfree(pwrb_context->pwrb_handle_basestd);
3583 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3585 struct be_queue_info *q;
3586 struct be_ctrl_info *ctrl = &phba->ctrl;
3588 q = &phba->ctrl.mcc_obj.q;
3590 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3591 be_queue_free(phba, q);
3593 q = &phba->ctrl.mcc_obj.cq;
3595 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3596 be_queue_free(phba, q);
3599 static void hwi_cleanup(struct beiscsi_hba *phba)
3601 struct be_queue_info *q;
3602 struct be_ctrl_info *ctrl = &phba->ctrl;
3603 struct hwi_controller *phwi_ctrlr;
3604 struct hwi_context_memory *phwi_context;
3605 struct hwi_async_pdu_context *pasync_ctx;
3606 int i, eq_num, ulp_num;
3608 phwi_ctrlr = phba->phwi_ctrlr;
3609 phwi_context = phwi_ctrlr->phwi_ctxt;
3611 be_cmd_iscsi_remove_template_hdr(ctrl);
3613 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3614 q = &phwi_context->be_wrbq[i];
3616 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3618 kfree(phwi_context->be_wrbq);
3619 free_wrb_handles(phba);
3621 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3622 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3624 q = &phwi_context->be_def_hdrq[ulp_num];
3626 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3628 q = &phwi_context->be_def_dataq[ulp_num];
3630 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3632 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3636 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3638 for (i = 0; i < (phba->num_cpus); i++) {
3639 q = &phwi_context->be_cq[i];
3641 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3643 if (phba->msix_enabled)
3647 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3648 q = &phwi_context->be_eq[i].q;
3650 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3652 be_mcc_queues_destroy(phba);
3653 be_cmd_fw_uninit(ctrl);
3656 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3657 struct hwi_context_memory *phwi_context)
3659 struct be_queue_info *q, *cq;
3660 struct be_ctrl_info *ctrl = &phba->ctrl;
3662 /* Alloc MCC compl queue */
3663 cq = &phba->ctrl.mcc_obj.cq;
3664 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3665 sizeof(struct be_mcc_compl)))
3667 /* Ask BE to create MCC compl queue; */
3668 if (phba->msix_enabled) {
3669 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3670 [phba->num_cpus].q, false, true, 0))
3673 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3678 /* Alloc MCC queue */
3679 q = &phba->ctrl.mcc_obj.q;
3680 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3681 goto mcc_cq_destroy;
3683 /* Ask BE to create MCC queue */
3684 if (beiscsi_cmd_mccq_create(phba, q, cq))
3690 be_queue_free(phba, q);
3692 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3694 be_queue_free(phba, cq);
3700 * find_num_cpus()- Get the CPU online count
3701 * @phba: ptr to priv structure
3703 * CPU count is used for creating EQ.
3705 static void find_num_cpus(struct beiscsi_hba *phba)
3709 num_cpus = num_online_cpus();
3711 switch (phba->generation) {
3714 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
3715 BEISCSI_MAX_NUM_CPUS : num_cpus;
3718 phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
3719 OC_SKH_MAX_NUM_CPUS : num_cpus;
3726 static int hwi_init_port(struct beiscsi_hba *phba)
3728 struct hwi_controller *phwi_ctrlr;
3729 struct hwi_context_memory *phwi_context;
3730 unsigned int def_pdu_ring_sz;
3731 struct be_ctrl_info *ctrl = &phba->ctrl;
3732 int status, ulp_num;
3734 phwi_ctrlr = phba->phwi_ctrlr;
3735 phwi_context = phwi_ctrlr->phwi_ctxt;
3736 phwi_context->max_eqd = 0;
3737 phwi_context->min_eqd = 0;
3738 phwi_context->cur_eqd = 64;
3739 be_cmd_fw_initialize(&phba->ctrl);
3741 status = beiscsi_create_eqs(phba, phwi_context);
3743 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3744 "BM_%d : EQ not created\n");
3748 status = be_mcc_queues_create(phba, phwi_context);
3752 status = mgmt_check_supported_fw(ctrl, phba);
3754 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3755 "BM_%d : Unsupported fw version\n");
3759 status = beiscsi_create_cqs(phba, phwi_context);
3761 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3762 "BM_%d : CQ not created\n");
3766 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3767 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3770 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
3771 sizeof(struct phys_addr);
3773 status = beiscsi_create_def_hdr(phba, phwi_context,
3778 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3779 "BM_%d : Default Header not created for ULP : %d\n",
3784 status = beiscsi_create_def_data(phba, phwi_context,
3789 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3790 "BM_%d : Default Data not created for ULP : %d\n",
3797 status = beiscsi_post_pages(phba);
3799 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3800 "BM_%d : Post SGL Pages Failed\n");
3804 status = beiscsi_post_template_hdr(phba);
3806 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3807 "BM_%d : Template HDR Posting for CXN Failed\n");
3810 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3812 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3813 "BM_%d : WRB Rings not created\n");
3817 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3818 uint16_t async_arr_idx = 0;
3820 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3822 struct hwi_async_pdu_context *pasync_ctx;
3824 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3825 phwi_ctrlr, ulp_num);
3827 phba->params.cxns_per_ctrl; cri++) {
3828 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3830 pasync_ctx->cid_to_async_cri_map[
3831 phwi_ctrlr->wrb_context[cri].cid] =
3837 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3838 "BM_%d : hwi_init_port success\n");
3842 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3843 "BM_%d : hwi_init_port failed");
3848 static int hwi_init_controller(struct beiscsi_hba *phba)
3850 struct hwi_controller *phwi_ctrlr;
3852 phwi_ctrlr = phba->phwi_ctrlr;
3853 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3854 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3855 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3856 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3857 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3858 phwi_ctrlr->phwi_ctxt);
3860 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3861 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3862 "than one element.Failing to load\n");
3866 iscsi_init_global_templates(phba);
3867 if (beiscsi_init_wrb_handle(phba))
3870 if (hwi_init_async_pdu_ctx(phba)) {
3871 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3872 "BM_%d : hwi_init_async_pdu_ctx failed\n");
3876 if (hwi_init_port(phba) != 0) {
3877 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3878 "BM_%d : hwi_init_controller failed\n");
3885 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3887 struct be_mem_descriptor *mem_descr;
3890 mem_descr = phba->init_mem;
3893 for (i = 0; i < SE_MEM_MAX; i++) {
3894 for (j = mem_descr->num_elements; j > 0; j--) {
3895 pci_free_consistent(phba->pcidev,
3896 mem_descr->mem_array[j - 1].size,
3897 mem_descr->mem_array[j - 1].virtual_address,
3898 (unsigned long)mem_descr->mem_array[j - 1].
3899 bus_address.u.a64.address);
3902 kfree(mem_descr->mem_array);
3905 kfree(phba->init_mem);
3906 kfree(phba->phwi_ctrlr->wrb_context);
3907 kfree(phba->phwi_ctrlr);
3910 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3914 ret = beiscsi_get_memory(phba);
3916 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3917 "BM_%d : beiscsi_dev_probe -"
3918 "Failed in beiscsi_alloc_memory\n");
3922 ret = hwi_init_controller(phba);
3925 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3926 "BM_%d : Return success from beiscsi_init_controller");
3931 beiscsi_free_mem(phba);
3935 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3937 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3938 struct sgl_handle *psgl_handle;
3939 struct iscsi_sge *pfrag;
3940 unsigned int arr_index, i, idx, ulp_num = 0;
3942 phba->io_sgl_hndl_avbl = 0;
3943 phba->eh_sgl_hndl_avbl = 0;
3945 mem_descr_sglh = phba->init_mem;
3946 mem_descr_sglh += HWI_MEM_SGLH;
3947 if (1 == mem_descr_sglh->num_elements) {
3948 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3949 phba->params.ios_per_ctrl,
3951 if (!phba->io_sgl_hndl_base) {
3952 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3953 "BM_%d : Mem Alloc Failed. Failing to load\n");
3956 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3957 (phba->params.icds_per_ctrl -
3958 phba->params.ios_per_ctrl),
3960 if (!phba->eh_sgl_hndl_base) {
3961 kfree(phba->io_sgl_hndl_base);
3962 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3963 "BM_%d : Mem Alloc Failed. Failing to load\n");
3967 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3968 "BM_%d : HWI_MEM_SGLH is more than one element."
3969 "Failing to load\n");
3975 while (idx < mem_descr_sglh->num_elements) {
3976 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3978 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3979 sizeof(struct sgl_handle)); i++) {
3980 if (arr_index < phba->params.ios_per_ctrl) {
3981 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3982 phba->io_sgl_hndl_avbl++;
3985 phba->eh_sgl_hndl_base[arr_index -
3986 phba->params.ios_per_ctrl] =
3989 phba->eh_sgl_hndl_avbl++;
3995 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3996 "BM_%d : phba->io_sgl_hndl_avbl=%d"
3997 "phba->eh_sgl_hndl_avbl=%d\n",
3998 phba->io_sgl_hndl_avbl,
3999 phba->eh_sgl_hndl_avbl);
4001 mem_descr_sg = phba->init_mem;
4002 mem_descr_sg += HWI_MEM_SGE;
4003 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4004 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
4005 mem_descr_sg->num_elements);
4009 while (idx < mem_descr_sg->num_elements) {
4010 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
4013 i < (mem_descr_sg->mem_array[idx].size) /
4014 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
4016 if (arr_index < phba->params.ios_per_ctrl)
4017 psgl_handle = phba->io_sgl_hndl_base[arr_index];
4019 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
4020 phba->params.ios_per_ctrl];
4021 psgl_handle->pfrag = pfrag;
4022 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
4023 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
4024 pfrag += phba->params.num_sge_per_io;
4025 psgl_handle->sgl_index =
4026 phba->fw_config.iscsi_icd_start[ulp_num] +
4031 phba->io_sgl_free_index = 0;
4032 phba->io_sgl_alloc_index = 0;
4033 phba->eh_sgl_free_index = 0;
4034 phba->eh_sgl_alloc_index = 0;
4038 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4042 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
4044 if (!phba->cid_array) {
4045 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4046 "BM_%d : Failed to allocate memory in "
4047 "hba_setup_cid_tbls\n");
4050 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
4051 phba->params.cxns_per_ctrl, GFP_KERNEL);
4052 if (!phba->ep_array) {
4053 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4054 "BM_%d : Failed to allocate memory in "
4055 "hba_setup_cid_tbls\n");
4056 kfree(phba->cid_array);
4057 phba->cid_array = NULL;
4061 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
4062 phba->params.cxns_per_ctrl, GFP_KERNEL);
4063 if (!phba->conn_table) {
4064 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4065 "BM_%d : Failed to allocate memory in"
4066 "hba_setup_cid_tbls\n");
4068 kfree(phba->cid_array);
4069 kfree(phba->ep_array);
4070 phba->cid_array = NULL;
4071 phba->ep_array = NULL;
4075 for (i = 0; i < phba->params.cxns_per_ctrl; i++)
4076 phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
4078 phba->avlbl_cids = phba->params.cxns_per_ctrl;
4082 static void hwi_enable_intr(struct beiscsi_hba *phba)
4084 struct be_ctrl_info *ctrl = &phba->ctrl;
4085 struct hwi_controller *phwi_ctrlr;
4086 struct hwi_context_memory *phwi_context;
4087 struct be_queue_info *eq;
4092 phwi_ctrlr = phba->phwi_ctrlr;
4093 phwi_context = phwi_ctrlr->phwi_ctxt;
4095 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
4096 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
4097 reg = ioread32(addr);
4099 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4101 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4102 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4103 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
4104 iowrite32(reg, addr);
4107 if (!phba->msix_enabled) {
4108 eq = &phwi_context->be_eq[0].q;
4109 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4110 "BM_%d : eq->id=%d\n", eq->id);
4112 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4114 for (i = 0; i <= phba->num_cpus; i++) {
4115 eq = &phwi_context->be_eq[i].q;
4116 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4117 "BM_%d : eq->id=%d\n", eq->id);
4118 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4123 static void hwi_disable_intr(struct beiscsi_hba *phba)
4125 struct be_ctrl_info *ctrl = &phba->ctrl;
4127 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
4128 u32 reg = ioread32(addr);
4130 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4132 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4133 iowrite32(reg, addr);
4135 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4136 "BM_%d : In hwi_disable_intr, Already Disabled\n");
4140 * beiscsi_get_boot_info()- Get the boot session info
4141 * @phba: The device priv structure instance
4143 * Get the boot target info and store in driver priv structure
4147 * Failure: Non-Zero Value
4149 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4151 struct be_cmd_get_session_resp *session_resp;
4152 struct be_dma_mem nonemb_cmd;
4154 unsigned int s_handle;
4157 /* Get the session handle of the boot target */
4158 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
4160 beiscsi_log(phba, KERN_ERR,
4161 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4162 "BM_%d : No boot session\n");
4165 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4166 sizeof(*session_resp),
4168 if (nonemb_cmd.va == NULL) {
4169 beiscsi_log(phba, KERN_ERR,
4170 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4171 "BM_%d : Failed to allocate memory for"
4172 "beiscsi_get_session_info\n");
4177 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
4178 tag = mgmt_get_session_info(phba, s_handle,
4181 beiscsi_log(phba, KERN_ERR,
4182 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4183 "BM_%d : beiscsi_get_session_info"
4189 ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
4191 beiscsi_log(phba, KERN_ERR,
4192 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4193 "BM_%d : beiscsi_get_session_info Failed");
4197 session_resp = nonemb_cmd.va ;
4199 memcpy(&phba->boot_sess, &session_resp->session_info,
4200 sizeof(struct mgmt_session_info));
4204 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4205 nonemb_cmd.va, nonemb_cmd.dma);
4209 static void beiscsi_boot_release(void *data)
4211 struct beiscsi_hba *phba = data;
4213 scsi_host_put(phba->shost);
4216 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
4218 struct iscsi_boot_kobj *boot_kobj;
4220 /* get boot info using mgmt cmd */
4221 if (beiscsi_get_boot_info(phba))
4222 /* Try to see if we can carry on without this */
4225 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
4226 if (!phba->boot_kset)
4229 /* get a ref because the show function will ref the phba */
4230 if (!scsi_host_get(phba->shost))
4232 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
4233 beiscsi_show_boot_tgt_info,
4234 beiscsi_tgt_get_attr_visibility,
4235 beiscsi_boot_release);
4239 if (!scsi_host_get(phba->shost))
4241 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
4242 beiscsi_show_boot_ini_info,
4243 beiscsi_ini_get_attr_visibility,
4244 beiscsi_boot_release);
4248 if (!scsi_host_get(phba->shost))
4250 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
4251 beiscsi_show_boot_eth_info,
4252 beiscsi_eth_get_attr_visibility,
4253 beiscsi_boot_release);
4259 scsi_host_put(phba->shost);
4261 iscsi_boot_destroy_kset(phba->boot_kset);
4265 static int beiscsi_init_port(struct beiscsi_hba *phba)
4269 ret = beiscsi_init_controller(phba);
4271 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4272 "BM_%d : beiscsi_dev_probe - Failed in"
4273 "beiscsi_init_controller\n");
4276 ret = beiscsi_init_sgl_handle(phba);
4278 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4279 "BM_%d : beiscsi_dev_probe - Failed in"
4280 "beiscsi_init_sgl_handle\n");
4281 goto do_cleanup_ctrlr;
4284 if (hba_setup_cid_tbls(phba)) {
4285 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4286 "BM_%d : Failed in hba_setup_cid_tbls\n");
4287 kfree(phba->io_sgl_hndl_base);
4288 kfree(phba->eh_sgl_hndl_base);
4289 goto do_cleanup_ctrlr;
4299 static void hwi_purge_eq(struct beiscsi_hba *phba)
4301 struct hwi_controller *phwi_ctrlr;
4302 struct hwi_context_memory *phwi_context;
4303 struct be_queue_info *eq;
4304 struct be_eq_entry *eqe = NULL;
4306 unsigned int num_processed;
4308 phwi_ctrlr = phba->phwi_ctrlr;
4309 phwi_context = phwi_ctrlr->phwi_ctxt;
4310 if (phba->msix_enabled)
4315 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
4316 eq = &phwi_context->be_eq[i].q;
4317 eqe = queue_tail_node(eq);
4319 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
4321 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
4323 eqe = queue_tail_node(eq);
4328 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
4332 static void beiscsi_clean_port(struct beiscsi_hba *phba)
4336 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
4338 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4339 "BM_%d : mgmt_epfw_cleanup FAILED\n");
4343 kfree(phba->io_sgl_hndl_base);
4344 kfree(phba->eh_sgl_hndl_base);
4345 kfree(phba->cid_array);
4346 kfree(phba->ep_array);
4347 kfree(phba->conn_table);
4351 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4352 * @beiscsi_conn: ptr to the conn to be cleaned up
4353 * @task: ptr to iscsi_task resource to be freed.
4355 * Free driver mgmt resources binded to CXN.
4358 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4359 struct iscsi_task *task)
4361 struct beiscsi_io_task *io_task;
4362 struct beiscsi_hba *phba = beiscsi_conn->phba;
4363 struct hwi_wrb_context *pwrb_context;
4364 struct hwi_controller *phwi_ctrlr;
4365 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4366 beiscsi_conn->beiscsi_conn_cid);
4368 phwi_ctrlr = phba->phwi_ctrlr;
4369 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4371 io_task = task->dd_data;
4373 if (io_task->pwrb_handle) {
4374 memset(io_task->pwrb_handle->pwrb, 0,
4375 sizeof(struct iscsi_wrb));
4376 free_wrb_handle(phba, pwrb_context,
4377 io_task->pwrb_handle);
4378 io_task->pwrb_handle = NULL;
4381 if (io_task->psgl_handle) {
4382 spin_lock_bh(&phba->mgmt_sgl_lock);
4383 free_mgmt_sgl_handle(phba,
4384 io_task->psgl_handle);
4385 io_task->psgl_handle = NULL;
4386 spin_unlock_bh(&phba->mgmt_sgl_lock);
4389 if (io_task->mtask_addr)
4390 pci_unmap_single(phba->pcidev,
4391 io_task->mtask_addr,
4392 io_task->mtask_data_count,
4397 * beiscsi_cleanup_task()- Free driver resources of the task
4398 * @task: ptr to the iscsi task
4401 static void beiscsi_cleanup_task(struct iscsi_task *task)
4403 struct beiscsi_io_task *io_task = task->dd_data;
4404 struct iscsi_conn *conn = task->conn;
4405 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4406 struct beiscsi_hba *phba = beiscsi_conn->phba;
4407 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4408 struct hwi_wrb_context *pwrb_context;
4409 struct hwi_controller *phwi_ctrlr;
4410 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4411 beiscsi_conn->beiscsi_conn_cid);
4413 phwi_ctrlr = phba->phwi_ctrlr;
4414 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4416 if (io_task->cmd_bhs) {
4417 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4418 io_task->bhs_pa.u.a64.address);
4419 io_task->cmd_bhs = NULL;
4423 if (io_task->pwrb_handle) {
4424 free_wrb_handle(phba, pwrb_context,
4425 io_task->pwrb_handle);
4426 io_task->pwrb_handle = NULL;
4429 if (io_task->psgl_handle) {
4430 spin_lock(&phba->io_sgl_lock);
4431 free_io_sgl_handle(phba, io_task->psgl_handle);
4432 spin_unlock(&phba->io_sgl_lock);
4433 io_task->psgl_handle = NULL;
4436 if (!beiscsi_conn->login_in_progress)
4437 beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
4442 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4443 struct beiscsi_offload_params *params)
4445 struct wrb_handle *pwrb_handle;
4446 struct beiscsi_hba *phba = beiscsi_conn->phba;
4447 struct iscsi_task *task = beiscsi_conn->task;
4448 struct iscsi_session *session = task->conn->session;
4452 * We can always use 0 here because it is reserved by libiscsi for
4453 * login/startup related tasks.
4455 beiscsi_conn->login_in_progress = 0;
4456 spin_lock_bh(&session->lock);
4457 beiscsi_cleanup_task(task);
4458 spin_unlock_bh(&session->lock);
4460 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
4462 /* Check for the adapter family */
4463 if (is_chip_be2_be3r(phba))
4464 beiscsi_offload_cxn_v0(params, pwrb_handle,
4467 beiscsi_offload_cxn_v2(params, pwrb_handle);
4469 be_dws_le_to_cpu(pwrb_handle->pwrb,
4470 sizeof(struct iscsi_target_context_update_wrb));
4472 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4473 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
4474 << DB_DEF_PDU_WRB_INDEX_SHIFT;
4475 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4477 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4480 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4481 int *index, int *age)
4485 *age = conn->session->age;
4489 * beiscsi_alloc_pdu - allocates pdu and related resources
4490 * @task: libiscsi task
4491 * @opcode: opcode of pdu for task
4493 * This is called with the session lock held. It will allocate
4494 * the wrb and sgl if needed for the command. And it will prep
4495 * the pdu's itt. beiscsi_parse_pdu will later translate
4496 * the pdu itt to the libiscsi task itt.
4498 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4500 struct beiscsi_io_task *io_task = task->dd_data;
4501 struct iscsi_conn *conn = task->conn;
4502 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4503 struct beiscsi_hba *phba = beiscsi_conn->phba;
4504 struct hwi_wrb_context *pwrb_context;
4505 struct hwi_controller *phwi_ctrlr;
4507 uint16_t cri_index = 0;
4508 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4511 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
4512 GFP_ATOMIC, &paddr);
4513 if (!io_task->cmd_bhs)
4515 io_task->bhs_pa.u.a64.address = paddr;
4516 io_task->libiscsi_itt = (itt_t)task->itt;
4517 io_task->conn = beiscsi_conn;
4519 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4520 task->hdr_max = sizeof(struct be_cmd_bhs);
4521 io_task->psgl_handle = NULL;
4522 io_task->pwrb_handle = NULL;
4525 spin_lock(&phba->io_sgl_lock);
4526 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4527 spin_unlock(&phba->io_sgl_lock);
4528 if (!io_task->psgl_handle) {
4529 beiscsi_log(phba, KERN_ERR,
4530 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4531 "BM_%d : Alloc of IO_SGL_ICD Failed"
4532 "for the CID : %d\n",
4533 beiscsi_conn->beiscsi_conn_cid);
4536 io_task->pwrb_handle = alloc_wrb_handle(phba,
4537 beiscsi_conn->beiscsi_conn_cid);
4538 if (!io_task->pwrb_handle) {
4539 beiscsi_log(phba, KERN_ERR,
4540 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4541 "BM_%d : Alloc of WRB_HANDLE Failed"
4542 "for the CID : %d\n",
4543 beiscsi_conn->beiscsi_conn_cid);
4547 io_task->scsi_cmnd = NULL;
4548 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4549 beiscsi_conn->task = task;
4550 if (!beiscsi_conn->login_in_progress) {
4551 spin_lock(&phba->mgmt_sgl_lock);
4552 io_task->psgl_handle = (struct sgl_handle *)
4553 alloc_mgmt_sgl_handle(phba);
4554 spin_unlock(&phba->mgmt_sgl_lock);
4555 if (!io_task->psgl_handle) {
4556 beiscsi_log(phba, KERN_ERR,
4559 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4560 "for the CID : %d\n",
4566 beiscsi_conn->login_in_progress = 1;
4567 beiscsi_conn->plogin_sgl_handle =
4568 io_task->psgl_handle;
4569 io_task->pwrb_handle =
4570 alloc_wrb_handle(phba,
4571 beiscsi_conn->beiscsi_conn_cid);
4572 if (!io_task->pwrb_handle) {
4573 beiscsi_log(phba, KERN_ERR,
4576 "BM_%d : Alloc of WRB_HANDLE Failed"
4577 "for the CID : %d\n",
4580 goto free_mgmt_hndls;
4582 beiscsi_conn->plogin_wrb_handle =
4583 io_task->pwrb_handle;
4586 io_task->psgl_handle =
4587 beiscsi_conn->plogin_sgl_handle;
4588 io_task->pwrb_handle =
4589 beiscsi_conn->plogin_wrb_handle;
4592 spin_lock(&phba->mgmt_sgl_lock);
4593 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4594 spin_unlock(&phba->mgmt_sgl_lock);
4595 if (!io_task->psgl_handle) {
4596 beiscsi_log(phba, KERN_ERR,
4599 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4600 "for the CID : %d\n",
4605 io_task->pwrb_handle =
4606 alloc_wrb_handle(phba,
4607 beiscsi_conn->beiscsi_conn_cid);
4608 if (!io_task->pwrb_handle) {
4609 beiscsi_log(phba, KERN_ERR,
4610 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4611 "BM_%d : Alloc of WRB_HANDLE Failed"
4612 "for the CID : %d\n",
4613 beiscsi_conn->beiscsi_conn_cid);
4614 goto free_mgmt_hndls;
4619 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4620 wrb_index << 16) | (unsigned int)
4621 (io_task->psgl_handle->sgl_index));
4622 io_task->pwrb_handle->pio_handle = task;
4624 io_task->cmd_bhs->iscsi_hdr.itt = itt;
4628 spin_lock(&phba->io_sgl_lock);
4629 free_io_sgl_handle(phba, io_task->psgl_handle);
4630 spin_unlock(&phba->io_sgl_lock);
4633 spin_lock(&phba->mgmt_sgl_lock);
4634 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4635 io_task->psgl_handle = NULL;
4636 spin_unlock(&phba->mgmt_sgl_lock);
4638 phwi_ctrlr = phba->phwi_ctrlr;
4639 cri_index = BE_GET_CRI_FROM_CID(
4640 beiscsi_conn->beiscsi_conn_cid);
4641 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4642 if (io_task->pwrb_handle)
4643 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4644 io_task->pwrb_handle = NULL;
4645 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4646 io_task->bhs_pa.u.a64.address);
4647 io_task->cmd_bhs = NULL;
4650 int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4651 unsigned int num_sg, unsigned int xferlen,
4652 unsigned int writedir)
4655 struct beiscsi_io_task *io_task = task->dd_data;
4656 struct iscsi_conn *conn = task->conn;
4657 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4658 struct beiscsi_hba *phba = beiscsi_conn->phba;
4659 struct iscsi_wrb *pwrb = NULL;
4660 unsigned int doorbell = 0;
4662 pwrb = io_task->pwrb_handle->pwrb;
4664 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4665 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4668 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4670 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4672 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4674 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4677 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4680 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4681 cpu_to_be16(*(unsigned short *)
4682 &io_task->cmd_bhs->iscsi_hdr.lun));
4683 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4684 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4685 io_task->pwrb_handle->wrb_index);
4686 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4687 be32_to_cpu(task->cmdsn));
4688 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4689 io_task->psgl_handle->sgl_index);
4691 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4692 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4693 io_task->pwrb_handle->nxt_wrb_index);
4695 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4697 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4698 doorbell |= (io_task->pwrb_handle->wrb_index &
4699 DB_DEF_PDU_WRB_INDEX_MASK) <<
4700 DB_DEF_PDU_WRB_INDEX_SHIFT;
4701 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4702 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4706 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4707 unsigned int num_sg, unsigned int xferlen,
4708 unsigned int writedir)
4711 struct beiscsi_io_task *io_task = task->dd_data;
4712 struct iscsi_conn *conn = task->conn;
4713 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4714 struct beiscsi_hba *phba = beiscsi_conn->phba;
4715 struct iscsi_wrb *pwrb = NULL;
4716 unsigned int doorbell = 0;
4718 pwrb = io_task->pwrb_handle->pwrb;
4719 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4720 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4723 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4725 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
4727 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4729 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4732 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
4735 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4736 cpu_to_be16(*(unsigned short *)
4737 &io_task->cmd_bhs->iscsi_hdr.lun));
4738 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4739 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4740 io_task->pwrb_handle->wrb_index);
4741 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4742 be32_to_cpu(task->cmdsn));
4743 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4744 io_task->psgl_handle->sgl_index);
4746 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4748 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4749 io_task->pwrb_handle->nxt_wrb_index);
4750 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4752 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4753 doorbell |= (io_task->pwrb_handle->wrb_index &
4754 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4755 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4757 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4761 static int beiscsi_mtask(struct iscsi_task *task)
4763 struct beiscsi_io_task *io_task = task->dd_data;
4764 struct iscsi_conn *conn = task->conn;
4765 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4766 struct beiscsi_hba *phba = beiscsi_conn->phba;
4767 struct iscsi_wrb *pwrb = NULL;
4768 unsigned int doorbell = 0;
4770 unsigned int pwrb_typeoffset = 0;
4772 cid = beiscsi_conn->beiscsi_conn_cid;
4773 pwrb = io_task->pwrb_handle->pwrb;
4774 memset(pwrb, 0, sizeof(*pwrb));
4776 if (is_chip_be2_be3r(phba)) {
4777 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4778 be32_to_cpu(task->cmdsn));
4779 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4780 io_task->pwrb_handle->wrb_index);
4781 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4782 io_task->psgl_handle->sgl_index);
4783 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4785 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4786 io_task->pwrb_handle->nxt_wrb_index);
4787 pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
4789 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4790 be32_to_cpu(task->cmdsn));
4791 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4792 io_task->pwrb_handle->wrb_index);
4793 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4794 io_task->psgl_handle->sgl_index);
4795 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4797 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4798 io_task->pwrb_handle->nxt_wrb_index);
4799 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4803 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4804 case ISCSI_OP_LOGIN:
4805 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4806 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4807 hwi_write_buffer(pwrb, task);
4809 case ISCSI_OP_NOOP_OUT:
4810 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4811 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4812 if (is_chip_be2_be3r(phba))
4813 AMAP_SET_BITS(struct amap_iscsi_wrb,
4816 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4819 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
4820 if (is_chip_be2_be3r(phba))
4821 AMAP_SET_BITS(struct amap_iscsi_wrb,
4824 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4827 hwi_write_buffer(pwrb, task);
4830 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4831 hwi_write_buffer(pwrb, task);
4833 case ISCSI_OP_SCSI_TMFUNC:
4834 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
4835 hwi_write_buffer(pwrb, task);
4837 case ISCSI_OP_LOGOUT:
4838 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
4839 hwi_write_buffer(pwrb, task);
4843 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4844 "BM_%d : opcode =%d Not supported\n",
4845 task->hdr->opcode & ISCSI_OPCODE_MASK);
4850 /* Set the task type */
4851 io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
4852 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
4853 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
4855 doorbell |= cid & DB_WRB_POST_CID_MASK;
4856 doorbell |= (io_task->pwrb_handle->wrb_index &
4857 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4858 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4859 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4863 static int beiscsi_task_xmit(struct iscsi_task *task)
4865 struct beiscsi_io_task *io_task = task->dd_data;
4866 struct scsi_cmnd *sc = task->sc;
4867 struct beiscsi_hba *phba = NULL;
4868 struct scatterlist *sg;
4870 unsigned int writedir = 0, xferlen = 0;
4872 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
4875 return beiscsi_mtask(task);
4877 io_task->scsi_cmnd = sc;
4878 num_sg = scsi_dma_map(sc);
4880 struct iscsi_conn *conn = task->conn;
4881 struct beiscsi_hba *phba = NULL;
4883 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
4884 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
4885 "BM_%d : scsi_dma_map Failed\n");
4889 xferlen = scsi_bufflen(sc);
4890 sg = scsi_sglist(sc);
4891 if (sc->sc_data_direction == DMA_TO_DEVICE)
4896 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
4900 * beiscsi_bsg_request - handle bsg request from ISCSI transport
4901 * @job: job to handle
4903 static int beiscsi_bsg_request(struct bsg_job *job)
4905 struct Scsi_Host *shost;
4906 struct beiscsi_hba *phba;
4907 struct iscsi_bsg_request *bsg_req = job->request;
4910 struct be_dma_mem nonemb_cmd;
4911 struct be_cmd_resp_hdr *resp;
4912 struct iscsi_bsg_reply *bsg_reply = job->reply;
4913 unsigned short status, extd_status;
4915 shost = iscsi_job_to_shost(job);
4916 phba = iscsi_host_priv(shost);
4918 switch (bsg_req->msgcode) {
4919 case ISCSI_BSG_HST_VENDOR:
4920 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4921 job->request_payload.payload_len,
4923 if (nonemb_cmd.va == NULL) {
4924 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4925 "BM_%d : Failed to allocate memory for "
4926 "beiscsi_bsg_request\n");
4929 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4932 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4933 "BM_%d : MBX Tag Allocation Failed\n");
4935 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4936 nonemb_cmd.va, nonemb_cmd.dma);
4940 rc = wait_event_interruptible_timeout(
4941 phba->ctrl.mcc_wait[tag],
4942 phba->ctrl.mcc_numtag[tag],
4944 BEISCSI_HOST_MBX_TIMEOUT));
4945 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4946 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4947 free_mcc_tag(&phba->ctrl, tag);
4948 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4949 sg_copy_from_buffer(job->reply_payload.sg_list,
4950 job->reply_payload.sg_cnt,
4951 nonemb_cmd.va, (resp->response_length
4953 bsg_reply->reply_payload_rcv_len = resp->response_length;
4954 bsg_reply->result = status;
4955 bsg_job_done(job, bsg_reply->result,
4956 bsg_reply->reply_payload_rcv_len);
4957 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4958 nonemb_cmd.va, nonemb_cmd.dma);
4959 if (status || extd_status) {
4960 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4961 "BM_%d : MBX Cmd Failed"
4962 " status = %d extd_status = %d\n",
4963 status, extd_status);
4972 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4973 "BM_%d : Unsupported bsg command: 0x%x\n",
4981 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4983 /* Set the logging parameter */
4984 beiscsi_log_enable_init(phba, beiscsi_log_enable);
4988 * beiscsi_quiesce()- Cleanup Driver resources
4989 * @phba: Instance Priv structure
4991 * Free the OS and HW resources held by the driver
4993 static void beiscsi_quiesce(struct beiscsi_hba *phba)
4995 struct hwi_controller *phwi_ctrlr;
4996 struct hwi_context_memory *phwi_context;
4997 struct be_eq_obj *pbe_eq;
4998 unsigned int i, msix_vec;
5000 phwi_ctrlr = phba->phwi_ctrlr;
5001 phwi_context = phwi_ctrlr->phwi_ctxt;
5002 hwi_disable_intr(phba);
5003 if (phba->msix_enabled) {
5004 for (i = 0; i <= phba->num_cpus; i++) {
5005 msix_vec = phba->msix_entries[i].vector;
5006 free_irq(msix_vec, &phwi_context->be_eq[i]);
5007 kfree(phba->msi_name[i]);
5010 if (phba->pcidev->irq)
5011 free_irq(phba->pcidev->irq, phba);
5012 pci_disable_msix(phba->pcidev);
5013 destroy_workqueue(phba->wq);
5014 if (blk_iopoll_enabled)
5015 for (i = 0; i < phba->num_cpus; i++) {
5016 pbe_eq = &phwi_context->be_eq[i];
5017 blk_iopoll_disable(&pbe_eq->iopoll);
5020 beiscsi_clean_port(phba);
5021 beiscsi_free_mem(phba);
5023 beiscsi_unmap_pci_function(phba);
5024 pci_free_consistent(phba->pcidev,
5025 phba->ctrl.mbox_mem_alloced.size,
5026 phba->ctrl.mbox_mem_alloced.va,
5027 phba->ctrl.mbox_mem_alloced.dma);
5029 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
5032 static void beiscsi_remove(struct pci_dev *pcidev)
5035 struct beiscsi_hba *phba = NULL;
5037 phba = pci_get_drvdata(pcidev);
5039 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5043 beiscsi_destroy_def_ifaces(phba);
5044 beiscsi_quiesce(phba);
5045 iscsi_boot_destroy_kset(phba->boot_kset);
5046 iscsi_host_remove(phba->shost);
5047 pci_dev_put(phba->pcidev);
5048 iscsi_host_free(phba->shost);
5049 pci_disable_device(pcidev);
5052 static void beiscsi_shutdown(struct pci_dev *pcidev)
5055 struct beiscsi_hba *phba = NULL;
5057 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
5059 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
5063 beiscsi_quiesce(phba);
5064 pci_disable_device(pcidev);
5067 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5071 for (i = 0; i <= phba->num_cpus; i++)
5072 phba->msix_entries[i].entry = i;
5074 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
5075 (phba->num_cpus + 1));
5077 phba->msix_enabled = true;
5083 * beiscsi_hw_health_check()- Check adapter health
5084 * @work: work item to check HW health
5086 * Check if adapter in an unrecoverable state or not.
5089 beiscsi_hw_health_check(struct work_struct *work)
5091 struct beiscsi_hba *phba =
5092 container_of(work, struct beiscsi_hba,
5093 beiscsi_hw_check_task.work);
5095 beiscsi_ue_detect(phba);
5097 schedule_delayed_work(&phba->beiscsi_hw_check_task,
5098 msecs_to_jiffies(1000));
5101 static int beiscsi_dev_probe(struct pci_dev *pcidev,
5102 const struct pci_device_id *id)
5104 struct beiscsi_hba *phba = NULL;
5105 struct hwi_controller *phwi_ctrlr;
5106 struct hwi_context_memory *phwi_context;
5107 struct be_eq_obj *pbe_eq;
5110 ret = beiscsi_enable_pci(pcidev);
5112 dev_err(&pcidev->dev,
5113 "beiscsi_dev_probe - Failed to enable pci device\n");
5117 phba = beiscsi_hba_alloc(pcidev);
5119 dev_err(&pcidev->dev,
5120 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
5124 /* Initialize Driver configuration Paramters */
5125 beiscsi_hba_attrs_init(phba);
5127 phba->fw_timeout = false;
5128 phba->mac_addr_set = false;
5131 switch (pcidev->device) {
5135 phba->generation = BE_GEN2;
5136 phba->iotask_fn = beiscsi_iotask;
5140 phba->generation = BE_GEN3;
5141 phba->iotask_fn = beiscsi_iotask;
5144 phba->generation = BE_GEN4;
5145 phba->iotask_fn = beiscsi_iotask_v2;
5148 phba->generation = 0;
5152 find_num_cpus(phba);
5156 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5157 "BM_%d : num_cpus = %d\n",
5161 beiscsi_msix_enable(phba);
5162 if (!phba->msix_enabled)
5165 ret = be_ctrl_init(phba, pcidev);
5167 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5168 "BM_%d : beiscsi_dev_probe-"
5169 "Failed in be_ctrl_init\n");
5173 ret = beiscsi_cmd_reset_function(phba);
5175 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5176 "BM_%d : Reset Failed\n");
5179 ret = be_chk_reset_complete(phba);
5181 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5182 "BM_%d : Failed to get out of reset.\n");
5186 spin_lock_init(&phba->io_sgl_lock);
5187 spin_lock_init(&phba->mgmt_sgl_lock);
5188 spin_lock_init(&phba->isr_lock);
5189 spin_lock_init(&phba->async_pdu_lock);
5190 ret = mgmt_get_fw_config(&phba->ctrl, phba);
5192 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5193 "BM_%d : Error getting fw config\n");
5196 phba->shost->max_id = phba->params.cxns_per_ctrl;
5197 beiscsi_get_params(phba);
5198 phba->shost->can_queue = phba->params.ios_per_ctrl;
5199 ret = beiscsi_init_port(phba);
5201 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5202 "BM_%d : beiscsi_dev_probe-"
5203 "Failed in beiscsi_init_port\n");
5207 for (i = 0; i < MAX_MCC_CMD ; i++) {
5208 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5209 phba->ctrl.mcc_tag[i] = i + 1;
5210 phba->ctrl.mcc_numtag[i + 1] = 0;
5211 phba->ctrl.mcc_tag_available++;
5214 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5216 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
5217 phba->shost->host_no);
5218 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name);
5220 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5221 "BM_%d : beiscsi_dev_probe-"
5222 "Failed to allocate work queue\n");
5226 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
5227 beiscsi_hw_health_check);
5229 phwi_ctrlr = phba->phwi_ctrlr;
5230 phwi_context = phwi_ctrlr->phwi_ctxt;
5232 if (blk_iopoll_enabled) {
5233 for (i = 0; i < phba->num_cpus; i++) {
5234 pbe_eq = &phwi_context->be_eq[i];
5235 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5237 blk_iopoll_enable(&pbe_eq->iopoll);
5240 i = (phba->msix_enabled) ? i : 0;
5241 /* Work item for MCC handling */
5242 pbe_eq = &phwi_context->be_eq[i];
5243 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5245 if (phba->msix_enabled) {
5246 for (i = 0; i <= phba->num_cpus; i++) {
5247 pbe_eq = &phwi_context->be_eq[i];
5248 INIT_WORK(&pbe_eq->work_cqs,
5249 beiscsi_process_all_cqs);
5252 pbe_eq = &phwi_context->be_eq[0];
5253 INIT_WORK(&pbe_eq->work_cqs,
5254 beiscsi_process_all_cqs);
5258 ret = beiscsi_init_irqs(phba);
5260 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5261 "BM_%d : beiscsi_dev_probe-"
5262 "Failed to beiscsi_init_irqs\n");
5265 hwi_enable_intr(phba);
5267 if (beiscsi_setup_boot_info(phba))
5269 * log error but continue, because we may not be using
5272 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5273 "BM_%d : Could not set up "
5274 "iSCSI boot info.\n");
5276 beiscsi_create_def_ifaces(phba);
5277 schedule_delayed_work(&phba->beiscsi_hw_check_task,
5278 msecs_to_jiffies(1000));
5280 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5281 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5285 destroy_workqueue(phba->wq);
5286 if (blk_iopoll_enabled)
5287 for (i = 0; i < phba->num_cpus; i++) {
5288 pbe_eq = &phwi_context->be_eq[i];
5289 blk_iopoll_disable(&pbe_eq->iopoll);
5292 beiscsi_clean_port(phba);
5293 beiscsi_free_mem(phba);
5295 pci_free_consistent(phba->pcidev,
5296 phba->ctrl.mbox_mem_alloced.size,
5297 phba->ctrl.mbox_mem_alloced.va,
5298 phba->ctrl.mbox_mem_alloced.dma);
5299 beiscsi_unmap_pci_function(phba);
5301 if (phba->msix_enabled)
5302 pci_disable_msix(phba->pcidev);
5303 iscsi_host_remove(phba->shost);
5304 pci_dev_put(phba->pcidev);
5305 iscsi_host_free(phba->shost);
5307 pci_disable_device(pcidev);
5311 struct iscsi_transport beiscsi_iscsi_transport = {
5312 .owner = THIS_MODULE,
5314 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
5315 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
5316 .create_session = beiscsi_session_create,
5317 .destroy_session = beiscsi_session_destroy,
5318 .create_conn = beiscsi_conn_create,
5319 .bind_conn = beiscsi_conn_bind,
5320 .destroy_conn = iscsi_conn_teardown,
5321 .attr_is_visible = be2iscsi_attr_is_visible,
5322 .set_iface_param = be2iscsi_iface_set_param,
5323 .get_iface_param = be2iscsi_iface_get_param,
5324 .set_param = beiscsi_set_param,
5325 .get_conn_param = iscsi_conn_get_param,
5326 .get_session_param = iscsi_session_get_param,
5327 .get_host_param = beiscsi_get_host_param,
5328 .start_conn = beiscsi_conn_start,
5329 .stop_conn = iscsi_conn_stop,
5330 .send_pdu = iscsi_conn_send_pdu,
5331 .xmit_task = beiscsi_task_xmit,
5332 .cleanup_task = beiscsi_cleanup_task,
5333 .alloc_pdu = beiscsi_alloc_pdu,
5334 .parse_pdu_itt = beiscsi_parse_pdu,
5335 .get_stats = beiscsi_conn_get_stats,
5336 .get_ep_param = beiscsi_ep_get_param,
5337 .ep_connect = beiscsi_ep_connect,
5338 .ep_poll = beiscsi_ep_poll,
5339 .ep_disconnect = beiscsi_ep_disconnect,
5340 .session_recovery_timedout = iscsi_session_recovery_timedout,
5341 .bsg_request = beiscsi_bsg_request,
5344 static struct pci_driver beiscsi_pci_driver = {
5346 .probe = beiscsi_dev_probe,
5347 .remove = beiscsi_remove,
5348 .shutdown = beiscsi_shutdown,
5349 .id_table = beiscsi_pci_id_table
5353 static int __init beiscsi_module_init(void)
5357 beiscsi_scsi_transport =
5358 iscsi_register_transport(&beiscsi_iscsi_transport);
5359 if (!beiscsi_scsi_transport) {
5361 "beiscsi_module_init - Unable to register beiscsi transport.\n");
5364 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
5365 &beiscsi_iscsi_transport);
5367 ret = pci_register_driver(&beiscsi_pci_driver);
5370 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
5371 goto unregister_iscsi_transport;
5375 unregister_iscsi_transport:
5376 iscsi_unregister_transport(&beiscsi_iscsi_transport);
5380 static void __exit beiscsi_module_exit(void)
5382 pci_unregister_driver(&beiscsi_pci_driver);
5383 iscsi_unregister_transport(&beiscsi_iscsi_transport);
5386 module_init(beiscsi_module_init);
5387 module_exit(beiscsi_module_exit);