[SCSI] be2iscsi: Fix return value and typo.
[linux-2.6-block.git] / drivers / scsi / be2iscsi / be_main.c
CommitLineData
6733b39a 1/**
255fa9a3 2 * Copyright (C) 2005 - 2011 Emulex
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
255fa9a3 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
6733b39a
JK
11 *
12 * Contact Information:
255fa9a3 13 * linux-drivers@emulex.com
6733b39a 14 *
255fa9a3
JK
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
6733b39a 18 */
255fa9a3 19
6733b39a
JK
20#include <linux/reboot.h>
21#include <linux/delay.h>
5a0e3ad6 22#include <linux/slab.h>
6733b39a
JK
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
c7acc5b8 29#include <linux/iscsi_boot_sysfs.h>
acf3368f 30#include <linux/module.h>
ffce3e2e 31#include <linux/bsg-lib.h>
6733b39a
JK
32
33#include <scsi/libiscsi.h>
ffce3e2e
JK
34#include <scsi/scsi_bsg_iscsi.h>
35#include <scsi/scsi_netlink.h>
6733b39a
JK
36#include <scsi/scsi_transport_iscsi.h>
37#include <scsi/scsi_transport.h>
38#include <scsi/scsi_cmnd.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi.h>
42#include "be_main.h"
43#include "be_iscsi.h"
44#include "be_mgmt.h"
0a513dd8 45#include "be_cmds.h"
6733b39a
JK
46
47static unsigned int be_iopoll_budget = 10;
48static unsigned int be_max_phys_size = 64;
bfead3b2 49static unsigned int enable_msix = 1;
6733b39a
JK
50
51MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
52MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
76d15dbd 53MODULE_VERSION(BUILD_STR);
2f635883 54MODULE_AUTHOR("Emulex Corporation");
6733b39a
JK
55MODULE_LICENSE("GPL");
56module_param(be_iopoll_budget, int, 0);
57module_param(enable_msix, int, 0);
58module_param(be_max_phys_size, uint, S_IRUGO);
99bc5d55
JSJ
59MODULE_PARM_DESC(be_max_phys_size,
60 "Maximum Size (In Kilobytes) of physically contiguous "
61 "memory that can be allocated. Range is 16 - 128");
62
63#define beiscsi_disp_param(_name)\
64ssize_t \
65beiscsi_##_name##_disp(struct device *dev,\
66 struct device_attribute *attrib, char *buf) \
67{ \
68 struct Scsi_Host *shost = class_to_shost(dev);\
69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 uint32_t param_val = 0; \
71 param_val = phba->attr_##_name;\
72 return snprintf(buf, PAGE_SIZE, "%d\n",\
73 phba->attr_##_name);\
74}
75
76#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
77int \
78beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
79{\
80 if (val >= _minval && val <= _maxval) {\
81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
82 "BA_%d : beiscsi_"#_name" updated "\
83 "from 0x%x ==> 0x%x\n",\
84 phba->attr_##_name, val); \
85 phba->attr_##_name = val;\
86 return 0;\
87 } \
88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
89 "BA_%d beiscsi_"#_name" attribute "\
90 "cannot be updated to 0x%x, "\
91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
92 return -EINVAL;\
93}
94
95#define beiscsi_store_param(_name) \
96ssize_t \
97beiscsi_##_name##_store(struct device *dev,\
98 struct device_attribute *attr, const char *buf,\
99 size_t count) \
100{ \
101 struct Scsi_Host *shost = class_to_shost(dev);\
102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
103 uint32_t param_val = 0;\
104 if (!isdigit(buf[0]))\
105 return -EINVAL;\
106 if (sscanf(buf, "%i", &param_val) != 1)\
107 return -EINVAL;\
108 if (beiscsi_##_name##_change(phba, param_val) == 0) \
109 return strlen(buf);\
110 else \
111 return -EINVAL;\
112}
113
114#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
115int \
116beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
117{ \
118 if (val >= _minval && val <= _maxval) {\
119 phba->attr_##_name = val;\
120 return 0;\
121 } \
122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
123 "BA_%d beiscsi_"#_name" attribute " \
124 "cannot be updated to 0x%x, "\
125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
126 phba->attr_##_name = _defval;\
127 return -EINVAL;\
128}
129
130#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
131static uint beiscsi_##_name = _defval;\
132module_param(beiscsi_##_name, uint, S_IRUGO);\
133MODULE_PARM_DESC(beiscsi_##_name, _descp);\
134beiscsi_disp_param(_name)\
135beiscsi_change_param(_name, _minval, _maxval, _defval)\
136beiscsi_store_param(_name)\
137beiscsi_init_param(_name, _minval, _maxval, _defval)\
138DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
139 beiscsi_##_name##_disp, beiscsi_##_name##_store)
140
141/*
142 * When new log level added update the
143 * the MAX allowed value for log_enable
144 */
145BEISCSI_RW_ATTR(log_enable, 0x00,
146 0xFF, 0x00, "Enable logging Bit Mask\n"
147 "\t\t\t\tInitialization Events : 0x01\n"
148 "\t\t\t\tMailbox Events : 0x02\n"
149 "\t\t\t\tMiscellaneous Events : 0x04\n"
150 "\t\t\t\tError Handling : 0x08\n"
151 "\t\t\t\tIO Path Events : 0x10\n"
152 "\t\t\t\tConfiguration Path : 0x20\n");
153
5cac7596 154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
99bc5d55
JSJ
155struct device_attribute *beiscsi_attrs[] = {
156 &dev_attr_beiscsi_log_enable,
5cac7596 157 &dev_attr_beiscsi_drvr_ver,
99bc5d55
JSJ
158 NULL,
159};
6733b39a 160
6763daae
JSJ
161static char const *cqe_desc[] = {
162 "RESERVED_DESC",
163 "SOL_CMD_COMPLETE",
164 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
165 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
166 "CXN_KILLED_BURST_LEN_MISMATCH",
167 "CXN_KILLED_AHS_RCVD",
168 "CXN_KILLED_HDR_DIGEST_ERR",
169 "CXN_KILLED_UNKNOWN_HDR",
170 "CXN_KILLED_STALE_ITT_TTT_RCVD",
171 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
172 "CXN_KILLED_RST_RCVD",
173 "CXN_KILLED_TIMED_OUT",
174 "CXN_KILLED_RST_SENT",
175 "CXN_KILLED_FIN_RCVD",
176 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
177 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
178 "CXN_KILLED_OVER_RUN_RESIDUAL",
179 "CXN_KILLED_UNDER_RUN_RESIDUAL",
180 "CMD_KILLED_INVALID_STATSN_RCVD",
181 "CMD_KILLED_INVALID_R2T_RCVD",
182 "CMD_CXN_KILLED_LUN_INVALID",
183 "CMD_CXN_KILLED_ICD_INVALID",
184 "CMD_CXN_KILLED_ITT_INVALID",
185 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
186 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
187 "CXN_INVALIDATE_NOTIFY",
188 "CXN_INVALIDATE_INDEX_NOTIFY",
189 "CMD_INVALIDATED_NOTIFY",
190 "UNSOL_HDR_NOTIFY",
191 "UNSOL_DATA_NOTIFY",
192 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
193 "DRIVERMSG_NOTIFY",
194 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
195 "SOL_CMD_KILLED_DIF_ERR",
196 "CXN_KILLED_SYN_RCVD",
197 "CXN_KILLED_IMM_DATA_RCVD"
198};
199
6733b39a
JK
200static int beiscsi_slave_configure(struct scsi_device *sdev)
201{
202 blk_queue_max_segment_size(sdev->request_queue, 65536);
203 return 0;
204}
205
4183122d
JK
206static int beiscsi_eh_abort(struct scsi_cmnd *sc)
207{
208 struct iscsi_cls_session *cls_session;
209 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
210 struct beiscsi_io_task *aborted_io_task;
211 struct iscsi_conn *conn;
212 struct beiscsi_conn *beiscsi_conn;
213 struct beiscsi_hba *phba;
214 struct iscsi_session *session;
215 struct invalidate_command_table *inv_tbl;
3cbb7a74 216 struct be_dma_mem nonemb_cmd;
4183122d
JK
217 unsigned int cid, tag, num_invalidate;
218
219 cls_session = starget_to_session(scsi_target(sc->device));
220 session = cls_session->dd_data;
221
222 spin_lock_bh(&session->lock);
223 if (!aborted_task || !aborted_task->sc) {
224 /* we raced */
225 spin_unlock_bh(&session->lock);
226 return SUCCESS;
227 }
228
229 aborted_io_task = aborted_task->dd_data;
230 if (!aborted_io_task->scsi_cmnd) {
231 /* raced or invalid command */
232 spin_unlock_bh(&session->lock);
233 return SUCCESS;
234 }
235 spin_unlock_bh(&session->lock);
236 conn = aborted_task->conn;
237 beiscsi_conn = conn->dd_data;
238 phba = beiscsi_conn->phba;
239
240 /* invalidate iocb */
241 cid = beiscsi_conn->beiscsi_conn_cid;
242 inv_tbl = phba->inv_tbl;
243 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
244 inv_tbl->cid = cid;
245 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
246 num_invalidate = 1;
3cbb7a74
JK
247 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
248 sizeof(struct invalidate_commands_params_in),
249 &nonemb_cmd.dma);
250 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
251 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
252 "BM_%d : Failed to allocate memory for"
253 "mgmt_invalidate_icds\n");
3cbb7a74
JK
254 return FAILED;
255 }
256 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
257
258 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
259 cid, &nonemb_cmd);
4183122d 260 if (!tag) {
99bc5d55
JSJ
261 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
262 "BM_%d : mgmt_invalidate_icds could not be"
263 "submitted\n");
3cbb7a74
JK
264 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
265 nonemb_cmd.va, nonemb_cmd.dma);
266
4183122d
JK
267 return FAILED;
268 } else {
269 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
270 phba->ctrl.mcc_numtag[tag]);
271 free_mcc_tag(&phba->ctrl, tag);
272 }
3cbb7a74
JK
273 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
274 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
275 return iscsi_eh_abort(sc);
276}
277
278static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
279{
280 struct iscsi_task *abrt_task;
281 struct beiscsi_io_task *abrt_io_task;
282 struct iscsi_conn *conn;
283 struct beiscsi_conn *beiscsi_conn;
284 struct beiscsi_hba *phba;
285 struct iscsi_session *session;
286 struct iscsi_cls_session *cls_session;
287 struct invalidate_command_table *inv_tbl;
3cbb7a74 288 struct be_dma_mem nonemb_cmd;
4183122d 289 unsigned int cid, tag, i, num_invalidate;
4183122d
JK
290
291 /* invalidate iocbs */
292 cls_session = starget_to_session(scsi_target(sc->device));
293 session = cls_session->dd_data;
294 spin_lock_bh(&session->lock);
db7f7709
JK
295 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
296 spin_unlock_bh(&session->lock);
297 return FAILED;
298 }
4183122d
JK
299 conn = session->leadconn;
300 beiscsi_conn = conn->dd_data;
301 phba = beiscsi_conn->phba;
302 cid = beiscsi_conn->beiscsi_conn_cid;
303 inv_tbl = phba->inv_tbl;
304 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
305 num_invalidate = 0;
306 for (i = 0; i < conn->session->cmds_max; i++) {
307 abrt_task = conn->session->cmds[i];
308 abrt_io_task = abrt_task->dd_data;
309 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
310 continue;
311
312 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
313 continue;
314
315 inv_tbl->cid = cid;
316 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
317 num_invalidate++;
318 inv_tbl++;
319 }
320 spin_unlock_bh(&session->lock);
321 inv_tbl = phba->inv_tbl;
322
3cbb7a74
JK
323 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
324 sizeof(struct invalidate_commands_params_in),
325 &nonemb_cmd.dma);
326 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
327 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
328 "BM_%d : Failed to allocate memory for"
329 "mgmt_invalidate_icds\n");
3cbb7a74
JK
330 return FAILED;
331 }
332 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
333 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
334 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
335 cid, &nonemb_cmd);
4183122d 336 if (!tag) {
99bc5d55
JSJ
337 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
338 "BM_%d : mgmt_invalidate_icds could not be"
339 " submitted\n");
3cbb7a74
JK
340 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
341 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
342 return FAILED;
343 } else {
344 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
345 phba->ctrl.mcc_numtag[tag]);
346 free_mcc_tag(&phba->ctrl, tag);
347 }
3cbb7a74
JK
348 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
349 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 350 return iscsi_eh_device_reset(sc);
4183122d
JK
351}
352
c7acc5b8
JK
353static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
354{
355 struct beiscsi_hba *phba = data;
f457a46f
MC
356 struct mgmt_session_info *boot_sess = &phba->boot_sess;
357 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
c7acc5b8
JK
358 char *str = buf;
359 int rc;
360
361 switch (type) {
362 case ISCSI_BOOT_TGT_NAME:
363 rc = sprintf(buf, "%.*s\n",
f457a46f
MC
364 (int)strlen(boot_sess->target_name),
365 (char *)&boot_sess->target_name);
c7acc5b8
JK
366 break;
367 case ISCSI_BOOT_TGT_IP_ADDR:
f457a46f 368 if (boot_conn->dest_ipaddr.ip_type == 0x1)
c7acc5b8 369 rc = sprintf(buf, "%pI4\n",
0e43895e 370 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
371 else
372 rc = sprintf(str, "%pI6\n",
0e43895e 373 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
374 break;
375 case ISCSI_BOOT_TGT_PORT:
f457a46f 376 rc = sprintf(str, "%d\n", boot_conn->dest_port);
c7acc5b8
JK
377 break;
378
379 case ISCSI_BOOT_TGT_CHAP_NAME:
380 rc = sprintf(str, "%.*s\n",
f457a46f
MC
381 boot_conn->negotiated_login_options.auth_data.chap.
382 target_chap_name_length,
383 (char *)&boot_conn->negotiated_login_options.
384 auth_data.chap.target_chap_name);
c7acc5b8
JK
385 break;
386 case ISCSI_BOOT_TGT_CHAP_SECRET:
387 rc = sprintf(str, "%.*s\n",
f457a46f
MC
388 boot_conn->negotiated_login_options.auth_data.chap.
389 target_secret_length,
390 (char *)&boot_conn->negotiated_login_options.
391 auth_data.chap.target_secret);
c7acc5b8
JK
392 break;
393 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
394 rc = sprintf(str, "%.*s\n",
f457a46f
MC
395 boot_conn->negotiated_login_options.auth_data.chap.
396 intr_chap_name_length,
397 (char *)&boot_conn->negotiated_login_options.
398 auth_data.chap.intr_chap_name);
c7acc5b8
JK
399 break;
400 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
f457a46f
MC
401 rc = sprintf(str, "%.*s\n",
402 boot_conn->negotiated_login_options.auth_data.chap.
403 intr_secret_length,
404 (char *)&boot_conn->negotiated_login_options.
405 auth_data.chap.intr_secret);
c7acc5b8
JK
406 break;
407 case ISCSI_BOOT_TGT_FLAGS:
f457a46f 408 rc = sprintf(str, "2\n");
c7acc5b8
JK
409 break;
410 case ISCSI_BOOT_TGT_NIC_ASSOC:
f457a46f 411 rc = sprintf(str, "0\n");
c7acc5b8
JK
412 break;
413 default:
414 rc = -ENOSYS;
415 break;
416 }
417 return rc;
418}
419
420static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
421{
422 struct beiscsi_hba *phba = data;
423 char *str = buf;
424 int rc;
425
426 switch (type) {
427 case ISCSI_BOOT_INI_INITIATOR_NAME:
428 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
429 break;
430 default:
431 rc = -ENOSYS;
432 break;
433 }
434 return rc;
435}
436
437static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
438{
439 struct beiscsi_hba *phba = data;
440 char *str = buf;
441 int rc;
442
443 switch (type) {
444 case ISCSI_BOOT_ETH_FLAGS:
f457a46f 445 rc = sprintf(str, "2\n");
c7acc5b8
JK
446 break;
447 case ISCSI_BOOT_ETH_INDEX:
f457a46f 448 rc = sprintf(str, "0\n");
c7acc5b8
JK
449 break;
450 case ISCSI_BOOT_ETH_MAC:
0e43895e
MC
451 rc = beiscsi_get_macaddr(str, phba);
452 break;
c7acc5b8
JK
453 default:
454 rc = -ENOSYS;
455 break;
456 }
457 return rc;
458}
459
460
587a1f16 461static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
c7acc5b8 462{
587a1f16 463 umode_t rc;
c7acc5b8
JK
464
465 switch (type) {
466 case ISCSI_BOOT_TGT_NAME:
467 case ISCSI_BOOT_TGT_IP_ADDR:
468 case ISCSI_BOOT_TGT_PORT:
469 case ISCSI_BOOT_TGT_CHAP_NAME:
470 case ISCSI_BOOT_TGT_CHAP_SECRET:
471 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
472 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
473 case ISCSI_BOOT_TGT_NIC_ASSOC:
474 case ISCSI_BOOT_TGT_FLAGS:
475 rc = S_IRUGO;
476 break;
477 default:
478 rc = 0;
479 break;
480 }
481 return rc;
482}
483
587a1f16 484static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
c7acc5b8 485{
587a1f16 486 umode_t rc;
c7acc5b8
JK
487
488 switch (type) {
489 case ISCSI_BOOT_INI_INITIATOR_NAME:
490 rc = S_IRUGO;
491 break;
492 default:
493 rc = 0;
494 break;
495 }
496 return rc;
497}
498
499
587a1f16 500static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
c7acc5b8 501{
587a1f16 502 umode_t rc;
c7acc5b8
JK
503
504 switch (type) {
505 case ISCSI_BOOT_ETH_FLAGS:
506 case ISCSI_BOOT_ETH_MAC:
507 case ISCSI_BOOT_ETH_INDEX:
508 rc = S_IRUGO;
509 break;
510 default:
511 rc = 0;
512 break;
513 }
514 return rc;
515}
516
bfead3b2
JK
517/*------------------- PCI Driver operations and data ----------------- */
518static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
519 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
f98c96b0 520 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
bfead3b2
JK
521 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
522 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
523 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
bfead3b2
JK
524 { 0 }
525};
526MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
527
99bc5d55 528
6733b39a
JK
529static struct scsi_host_template beiscsi_sht = {
530 .module = THIS_MODULE,
2f635883 531 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
6733b39a
JK
532 .proc_name = DRV_NAME,
533 .queuecommand = iscsi_queuecommand,
6733b39a
JK
534 .change_queue_depth = iscsi_change_queue_depth,
535 .slave_configure = beiscsi_slave_configure,
536 .target_alloc = iscsi_target_alloc,
4183122d
JK
537 .eh_abort_handler = beiscsi_eh_abort,
538 .eh_device_reset_handler = beiscsi_eh_device_reset,
309ce156 539 .eh_target_reset_handler = iscsi_eh_session_reset,
99bc5d55 540 .shost_attrs = beiscsi_attrs,
6733b39a
JK
541 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
542 .can_queue = BE2_IO_DEPTH,
543 .this_id = -1,
544 .max_sectors = BEISCSI_MAX_SECTORS,
545 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
546 .use_clustering = ENABLE_CLUSTERING,
ffce3e2e
JK
547 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
548
6733b39a 549};
6733b39a 550
bfead3b2 551static struct scsi_transport_template *beiscsi_scsi_transport;
6733b39a
JK
552
553static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
554{
555 struct beiscsi_hba *phba;
556 struct Scsi_Host *shost;
557
558 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
559 if (!shost) {
99bc5d55
JSJ
560 dev_err(&pcidev->dev,
561 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
6733b39a
JK
562 return NULL;
563 }
564 shost->dma_boundary = pcidev->dma_mask;
565 shost->max_id = BE2_MAX_SESSIONS;
566 shost->max_channel = 0;
567 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
568 shost->max_lun = BEISCSI_NUM_MAX_LUN;
569 shost->transportt = beiscsi_scsi_transport;
6733b39a
JK
570 phba = iscsi_host_priv(shost);
571 memset(phba, 0, sizeof(*phba));
572 phba->shost = shost;
573 phba->pcidev = pci_dev_get(pcidev);
2807afb7 574 pci_set_drvdata(pcidev, phba);
0e43895e 575 phba->interface_handle = 0xFFFFFFFF;
6733b39a
JK
576
577 if (iscsi_host_add(shost, &phba->pcidev->dev))
578 goto free_devices;
c7acc5b8 579
6733b39a
JK
580 return phba;
581
582free_devices:
583 pci_dev_put(phba->pcidev);
584 iscsi_host_free(phba->shost);
585 return NULL;
586}
587
588static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
589{
590 if (phba->csr_va) {
591 iounmap(phba->csr_va);
592 phba->csr_va = NULL;
593 }
594 if (phba->db_va) {
595 iounmap(phba->db_va);
596 phba->db_va = NULL;
597 }
598 if (phba->pci_va) {
599 iounmap(phba->pci_va);
600 phba->pci_va = NULL;
601 }
602}
603
604static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
605 struct pci_dev *pcidev)
606{
607 u8 __iomem *addr;
f98c96b0 608 int pcicfg_reg;
6733b39a
JK
609
610 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
611 pci_resource_len(pcidev, 2));
612 if (addr == NULL)
613 return -ENOMEM;
614 phba->ctrl.csr = addr;
615 phba->csr_va = addr;
616 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
617
618 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
619 if (addr == NULL)
620 goto pci_map_err;
621 phba->ctrl.db = addr;
622 phba->db_va = addr;
623 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
624
f98c96b0
JK
625 if (phba->generation == BE_GEN2)
626 pcicfg_reg = 1;
627 else
628 pcicfg_reg = 0;
629
630 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
631 pci_resource_len(pcidev, pcicfg_reg));
632
6733b39a
JK
633 if (addr == NULL)
634 goto pci_map_err;
635 phba->ctrl.pcicfg = addr;
636 phba->pci_va = addr;
f98c96b0 637 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
6733b39a
JK
638 return 0;
639
640pci_map_err:
641 beiscsi_unmap_pci_function(phba);
642 return -ENOMEM;
643}
644
645static int beiscsi_enable_pci(struct pci_dev *pcidev)
646{
647 int ret;
648
649 ret = pci_enable_device(pcidev);
650 if (ret) {
99bc5d55
JSJ
651 dev_err(&pcidev->dev,
652 "beiscsi_enable_pci - enable device failed\n");
6733b39a
JK
653 return ret;
654 }
655
bfead3b2 656 pci_set_master(pcidev);
6733b39a
JK
657 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
658 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
659 if (ret) {
660 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
661 pci_disable_device(pcidev);
662 return ret;
663 }
664 }
665 return 0;
666}
667
668static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
669{
670 struct be_ctrl_info *ctrl = &phba->ctrl;
671 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
672 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
673 int status = 0;
674
675 ctrl->pdev = pdev;
676 status = beiscsi_map_pci_bars(phba, pdev);
677 if (status)
678 return status;
6733b39a
JK
679 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
680 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
681 mbox_mem_alloc->size,
682 &mbox_mem_alloc->dma);
683 if (!mbox_mem_alloc->va) {
684 beiscsi_unmap_pci_function(phba);
a49e06d5 685 return -ENOMEM;
6733b39a
JK
686 }
687
688 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
689 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
690 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
691 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
692 spin_lock_init(&ctrl->mbox_lock);
bfead3b2
JK
693 spin_lock_init(&phba->ctrl.mcc_lock);
694 spin_lock_init(&phba->ctrl.mcc_cq_lock);
695
6733b39a
JK
696 return status;
697}
698
699static void beiscsi_get_params(struct beiscsi_hba *phba)
700{
7da50879
JK
701 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
702 - (phba->fw_config.iscsi_cid_count
703 + BE2_TMFS
704 + BE2_NOPOUT_REQ));
705 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
ed58ea2a 706 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
6eab04a8 707 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
6733b39a
JK
708 phba->params.num_sge_per_io = BE2_SGE;
709 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
710 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
711 phba->params.eq_timer = 64;
712 phba->params.num_eq_entries =
7da50879
JK
713 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
714 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
715 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
716 ? 1024 : phba->params.num_eq_entries;
99bc5d55
JSJ
717 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
718 "BM_%d : phba->params.num_eq_entries=%d\n",
719 phba->params.num_eq_entries);
6733b39a 720 phba->params.num_cq_entries =
7da50879
JK
721 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
722 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
723 phba->params.wrbs_per_cxn = 256;
724}
725
726static void hwi_ring_eq_db(struct beiscsi_hba *phba,
727 unsigned int id, unsigned int clr_interrupt,
728 unsigned int num_processed,
729 unsigned char rearm, unsigned char event)
730{
731 u32 val = 0;
732 val |= id & DB_EQ_RING_ID_MASK;
733 if (rearm)
734 val |= 1 << DB_EQ_REARM_SHIFT;
735 if (clr_interrupt)
736 val |= 1 << DB_EQ_CLR_SHIFT;
737 if (event)
738 val |= 1 << DB_EQ_EVNT_SHIFT;
739 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
740 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
741}
742
bfead3b2
JK
743/**
744 * be_isr_mcc - The isr routine of the driver.
745 * @irq: Not used
746 * @dev_id: Pointer to host adapter structure
747 */
748static irqreturn_t be_isr_mcc(int irq, void *dev_id)
749{
750 struct beiscsi_hba *phba;
751 struct be_eq_entry *eqe = NULL;
752 struct be_queue_info *eq;
753 struct be_queue_info *mcc;
754 unsigned int num_eq_processed;
755 struct be_eq_obj *pbe_eq;
756 unsigned long flags;
757
758 pbe_eq = dev_id;
759 eq = &pbe_eq->q;
760 phba = pbe_eq->phba;
761 mcc = &phba->ctrl.mcc_obj.cq;
762 eqe = queue_tail_node(eq);
bfead3b2
JK
763
764 num_eq_processed = 0;
765
766 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
767 & EQE_VALID_MASK) {
768 if (((eqe->dw[offsetof(struct amap_eq_entry,
769 resource_id) / 32] &
770 EQE_RESID_MASK) >> 16) == mcc->id) {
771 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 772 pbe_eq->todo_mcc_cq = true;
bfead3b2
JK
773 spin_unlock_irqrestore(&phba->isr_lock, flags);
774 }
775 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
776 queue_tail_inc(eq);
777 eqe = queue_tail_node(eq);
778 num_eq_processed++;
779 }
72fb46a9
JSJ
780 if (pbe_eq->todo_mcc_cq)
781 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2
JK
782 if (num_eq_processed)
783 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
784
785 return IRQ_HANDLED;
786}
787
788/**
789 * be_isr_msix - The isr routine of the driver.
790 * @irq: Not used
791 * @dev_id: Pointer to host adapter structure
792 */
793static irqreturn_t be_isr_msix(int irq, void *dev_id)
794{
795 struct beiscsi_hba *phba;
796 struct be_eq_entry *eqe = NULL;
797 struct be_queue_info *eq;
798 struct be_queue_info *cq;
799 unsigned int num_eq_processed;
800 struct be_eq_obj *pbe_eq;
801 unsigned long flags;
802
803 pbe_eq = dev_id;
804 eq = &pbe_eq->q;
805 cq = pbe_eq->cq;
806 eqe = queue_tail_node(eq);
bfead3b2
JK
807
808 phba = pbe_eq->phba;
809 num_eq_processed = 0;
810 if (blk_iopoll_enabled) {
811 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
812 & EQE_VALID_MASK) {
813 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
814 blk_iopoll_sched(&pbe_eq->iopoll);
815
816 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
817 queue_tail_inc(eq);
818 eqe = queue_tail_node(eq);
819 num_eq_processed++;
820 }
bfead3b2
JK
821 } else {
822 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
823 & EQE_VALID_MASK) {
824 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 825 pbe_eq->todo_cq = true;
bfead3b2
JK
826 spin_unlock_irqrestore(&phba->isr_lock, flags);
827 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
828 queue_tail_inc(eq);
829 eqe = queue_tail_node(eq);
830 num_eq_processed++;
831 }
bfead3b2 832
72fb46a9
JSJ
833 if (pbe_eq->todo_cq)
834 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2 835 }
72fb46a9
JSJ
836
837 if (num_eq_processed)
838 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
839
840 return IRQ_HANDLED;
bfead3b2
JK
841}
842
6733b39a
JK
843/**
844 * be_isr - The isr routine of the driver.
845 * @irq: Not used
846 * @dev_id: Pointer to host adapter structure
847 */
848static irqreturn_t be_isr(int irq, void *dev_id)
849{
850 struct beiscsi_hba *phba;
851 struct hwi_controller *phwi_ctrlr;
852 struct hwi_context_memory *phwi_context;
853 struct be_eq_entry *eqe = NULL;
854 struct be_queue_info *eq;
855 struct be_queue_info *cq;
bfead3b2 856 struct be_queue_info *mcc;
6733b39a 857 unsigned long flags, index;
bfead3b2 858 unsigned int num_mcceq_processed, num_ioeq_processed;
6733b39a 859 struct be_ctrl_info *ctrl;
bfead3b2 860 struct be_eq_obj *pbe_eq;
6733b39a
JK
861 int isr;
862
863 phba = dev_id;
6eab04a8 864 ctrl = &phba->ctrl;
bfead3b2
JK
865 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
866 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
867 if (!isr)
868 return IRQ_NONE;
6733b39a
JK
869
870 phwi_ctrlr = phba->phwi_ctrlr;
871 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
872 pbe_eq = &phwi_context->be_eq[0];
873
874 eq = &phwi_context->be_eq[0].q;
875 mcc = &phba->ctrl.mcc_obj.cq;
6733b39a
JK
876 index = 0;
877 eqe = queue_tail_node(eq);
6733b39a 878
bfead3b2
JK
879 num_ioeq_processed = 0;
880 num_mcceq_processed = 0;
6733b39a
JK
881 if (blk_iopoll_enabled) {
882 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
883 & EQE_VALID_MASK) {
bfead3b2
JK
884 if (((eqe->dw[offsetof(struct amap_eq_entry,
885 resource_id) / 32] &
886 EQE_RESID_MASK) >> 16) == mcc->id) {
887 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 888 pbe_eq->todo_mcc_cq = true;
bfead3b2
JK
889 spin_unlock_irqrestore(&phba->isr_lock, flags);
890 num_mcceq_processed++;
891 } else {
892 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
893 blk_iopoll_sched(&pbe_eq->iopoll);
894 num_ioeq_processed++;
895 }
6733b39a
JK
896 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
897 queue_tail_inc(eq);
898 eqe = queue_tail_node(eq);
6733b39a 899 }
bfead3b2 900 if (num_ioeq_processed || num_mcceq_processed) {
72fb46a9
JSJ
901 if (pbe_eq->todo_mcc_cq)
902 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2 903
756d29c8 904 if ((num_mcceq_processed) && (!num_ioeq_processed))
bfead3b2
JK
905 hwi_ring_eq_db(phba, eq->id, 0,
906 (num_ioeq_processed +
907 num_mcceq_processed) , 1, 1);
908 else
909 hwi_ring_eq_db(phba, eq->id, 0,
910 (num_ioeq_processed +
911 num_mcceq_processed), 0, 1);
912
6733b39a
JK
913 return IRQ_HANDLED;
914 } else
915 return IRQ_NONE;
916 } else {
bfead3b2 917 cq = &phwi_context->be_cq[0];
6733b39a
JK
918 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
919 & EQE_VALID_MASK) {
920
921 if (((eqe->dw[offsetof(struct amap_eq_entry,
922 resource_id) / 32] &
923 EQE_RESID_MASK) >> 16) != cq->id) {
924 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 925 pbe_eq->todo_mcc_cq = true;
6733b39a
JK
926 spin_unlock_irqrestore(&phba->isr_lock, flags);
927 } else {
928 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 929 pbe_eq->todo_cq = true;
6733b39a
JK
930 spin_unlock_irqrestore(&phba->isr_lock, flags);
931 }
932 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
933 queue_tail_inc(eq);
934 eqe = queue_tail_node(eq);
bfead3b2 935 num_ioeq_processed++;
6733b39a 936 }
72fb46a9
JSJ
937 if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
938 queue_work(phba->wq, &pbe_eq->work_cqs);
6733b39a 939
bfead3b2
JK
940 if (num_ioeq_processed) {
941 hwi_ring_eq_db(phba, eq->id, 0,
942 num_ioeq_processed, 1, 1);
6733b39a
JK
943 return IRQ_HANDLED;
944 } else
945 return IRQ_NONE;
946 }
947}
948
949static int beiscsi_init_irqs(struct beiscsi_hba *phba)
950{
951 struct pci_dev *pcidev = phba->pcidev;
bfead3b2
JK
952 struct hwi_controller *phwi_ctrlr;
953 struct hwi_context_memory *phwi_context;
4f5af07e 954 int ret, msix_vec, i, j;
6733b39a 955
bfead3b2
JK
956 phwi_ctrlr = phba->phwi_ctrlr;
957 phwi_context = phwi_ctrlr->phwi_ctxt;
958
959 if (phba->msix_enabled) {
960 for (i = 0; i < phba->num_cpus; i++) {
8fcfb210
JK
961 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
962 GFP_KERNEL);
963 if (!phba->msi_name[i]) {
964 ret = -ENOMEM;
965 goto free_msix_irqs;
966 }
967
968 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
969 phba->shost->host_no, i);
bfead3b2 970 msix_vec = phba->msix_entries[i].vector;
8fcfb210
JK
971 ret = request_irq(msix_vec, be_isr_msix, 0,
972 phba->msi_name[i],
bfead3b2 973 &phwi_context->be_eq[i]);
4f5af07e 974 if (ret) {
99bc5d55
JSJ
975 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
976 "BM_%d : beiscsi_init_irqs-Failed to"
977 "register msix for i = %d\n",
978 i);
8fcfb210 979 kfree(phba->msi_name[i]);
4f5af07e
JK
980 goto free_msix_irqs;
981 }
bfead3b2 982 }
8fcfb210
JK
983 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
984 if (!phba->msi_name[i]) {
985 ret = -ENOMEM;
986 goto free_msix_irqs;
987 }
988 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
989 phba->shost->host_no);
bfead3b2 990 msix_vec = phba->msix_entries[i].vector;
8fcfb210 991 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
bfead3b2 992 &phwi_context->be_eq[i]);
4f5af07e 993 if (ret) {
99bc5d55
JSJ
994 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
995 "BM_%d : beiscsi_init_irqs-"
996 "Failed to register beiscsi_msix_mcc\n");
8fcfb210 997 kfree(phba->msi_name[i]);
4f5af07e
JK
998 goto free_msix_irqs;
999 }
1000
bfead3b2
JK
1001 } else {
1002 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1003 "beiscsi", phba);
1004 if (ret) {
99bc5d55
JSJ
1005 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1006 "BM_%d : beiscsi_init_irqs-"
1007 "Failed to register irq\\n");
bfead3b2
JK
1008 return ret;
1009 }
6733b39a
JK
1010 }
1011 return 0;
4f5af07e 1012free_msix_irqs:
8fcfb210
JK
1013 for (j = i - 1; j >= 0; j--) {
1014 kfree(phba->msi_name[j]);
1015 msix_vec = phba->msix_entries[j].vector;
4f5af07e 1016 free_irq(msix_vec, &phwi_context->be_eq[j]);
8fcfb210 1017 }
4f5af07e 1018 return ret;
6733b39a
JK
1019}
1020
1021static void hwi_ring_cq_db(struct beiscsi_hba *phba,
1022 unsigned int id, unsigned int num_processed,
1023 unsigned char rearm, unsigned char event)
1024{
1025 u32 val = 0;
1026 val |= id & DB_CQ_RING_ID_MASK;
1027 if (rearm)
1028 val |= 1 << DB_CQ_REARM_SHIFT;
1029 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1030 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1031}
1032
6733b39a
JK
1033static unsigned int
1034beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1035 struct beiscsi_hba *phba,
1036 unsigned short cid,
1037 struct pdu_base *ppdu,
1038 unsigned long pdu_len,
1039 void *pbuffer, unsigned long buf_len)
1040{
1041 struct iscsi_conn *conn = beiscsi_conn->conn;
1042 struct iscsi_session *session = conn->session;
bfead3b2
JK
1043 struct iscsi_task *task;
1044 struct beiscsi_io_task *io_task;
1045 struct iscsi_hdr *login_hdr;
6733b39a
JK
1046
1047 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1048 PDUBASE_OPCODE_MASK) {
1049 case ISCSI_OP_NOOP_IN:
1050 pbuffer = NULL;
1051 buf_len = 0;
1052 break;
1053 case ISCSI_OP_ASYNC_EVENT:
1054 break;
1055 case ISCSI_OP_REJECT:
1056 WARN_ON(!pbuffer);
1057 WARN_ON(!(buf_len == 48));
99bc5d55
JSJ
1058 beiscsi_log(phba, KERN_ERR,
1059 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1060 "BM_%d : In ISCSI_OP_REJECT\n");
6733b39a
JK
1061 break;
1062 case ISCSI_OP_LOGIN_RSP:
7bd6e25c 1063 case ISCSI_OP_TEXT_RSP:
bfead3b2
JK
1064 task = conn->login_task;
1065 io_task = task->dd_data;
1066 login_hdr = (struct iscsi_hdr *)ppdu;
1067 login_hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1068 break;
1069 default:
99bc5d55
JSJ
1070 beiscsi_log(phba, KERN_WARNING,
1071 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1072 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1073 (ppdu->
6733b39a 1074 dw[offsetof(struct amap_pdu_base, opcode) / 32]
99bc5d55 1075 & PDUBASE_OPCODE_MASK));
6733b39a
JK
1076 return 1;
1077 }
1078
1079 spin_lock_bh(&session->lock);
1080 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1081 spin_unlock_bh(&session->lock);
1082 return 0;
1083}
1084
1085static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1086{
1087 struct sgl_handle *psgl_handle;
1088
1089 if (phba->io_sgl_hndl_avbl) {
99bc5d55
JSJ
1090 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1091 "BM_%d : In alloc_io_sgl_handle,"
1092 " io_sgl_alloc_index=%d\n",
1093 phba->io_sgl_alloc_index);
1094
6733b39a
JK
1095 psgl_handle = phba->io_sgl_hndl_base[phba->
1096 io_sgl_alloc_index];
1097 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1098 phba->io_sgl_hndl_avbl--;
bfead3b2
JK
1099 if (phba->io_sgl_alloc_index == (phba->params.
1100 ios_per_ctrl - 1))
6733b39a
JK
1101 phba->io_sgl_alloc_index = 0;
1102 else
1103 phba->io_sgl_alloc_index++;
1104 } else
1105 psgl_handle = NULL;
1106 return psgl_handle;
1107}
1108
1109static void
1110free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1111{
99bc5d55
JSJ
1112 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1113 "BM_%d : In free_,io_sgl_free_index=%d\n",
1114 phba->io_sgl_free_index);
1115
6733b39a
JK
1116 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1117 /*
1118 * this can happen if clean_task is called on a task that
1119 * failed in xmit_task or alloc_pdu.
1120 */
99bc5d55
JSJ
1121 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1122 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1123 "value there=%p\n", phba->io_sgl_free_index,
1124 phba->io_sgl_hndl_base
1125 [phba->io_sgl_free_index]);
6733b39a
JK
1126 return;
1127 }
1128 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1129 phba->io_sgl_hndl_avbl++;
1130 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1131 phba->io_sgl_free_index = 0;
1132 else
1133 phba->io_sgl_free_index++;
1134}
1135
1136/**
1137 * alloc_wrb_handle - To allocate a wrb handle
1138 * @phba: The hba pointer
1139 * @cid: The cid to use for allocation
6733b39a
JK
1140 *
1141 * This happens under session_lock until submission to chip
1142 */
d5431488 1143struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
6733b39a
JK
1144{
1145 struct hwi_wrb_context *pwrb_context;
1146 struct hwi_controller *phwi_ctrlr;
d5431488 1147 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
6733b39a
JK
1148
1149 phwi_ctrlr = phba->phwi_ctrlr;
1150 pwrb_context = &phwi_ctrlr->wrb_context[cid];
d5431488 1151 if (pwrb_context->wrb_handles_available >= 2) {
bfead3b2
JK
1152 pwrb_handle = pwrb_context->pwrb_handle_base[
1153 pwrb_context->alloc_index];
1154 pwrb_context->wrb_handles_available--;
bfead3b2
JK
1155 if (pwrb_context->alloc_index ==
1156 (phba->params.wrbs_per_cxn - 1))
1157 pwrb_context->alloc_index = 0;
1158 else
1159 pwrb_context->alloc_index++;
d5431488
JK
1160 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1161 pwrb_context->alloc_index];
1162 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
bfead3b2
JK
1163 } else
1164 pwrb_handle = NULL;
6733b39a
JK
1165 return pwrb_handle;
1166}
1167
1168/**
1169 * free_wrb_handle - To free the wrb handle back to pool
1170 * @phba: The hba pointer
1171 * @pwrb_context: The context to free from
1172 * @pwrb_handle: The wrb_handle to free
1173 *
1174 * This happens under session_lock until submission to chip
1175 */
1176static void
1177free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1178 struct wrb_handle *pwrb_handle)
1179{
32951dd8 1180 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
bfead3b2
JK
1181 pwrb_context->wrb_handles_available++;
1182 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1183 pwrb_context->free_index = 0;
1184 else
1185 pwrb_context->free_index++;
1186
99bc5d55
JSJ
1187 beiscsi_log(phba, KERN_INFO,
1188 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1189 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1190 "wrb_handles_available=%d\n",
1191 pwrb_handle, pwrb_context->free_index,
1192 pwrb_context->wrb_handles_available);
6733b39a
JK
1193}
1194
1195static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1196{
1197 struct sgl_handle *psgl_handle;
1198
1199 if (phba->eh_sgl_hndl_avbl) {
1200 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1201 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
99bc5d55
JSJ
1202 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1203 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1204 phba->eh_sgl_alloc_index,
1205 phba->eh_sgl_alloc_index);
1206
6733b39a
JK
1207 phba->eh_sgl_hndl_avbl--;
1208 if (phba->eh_sgl_alloc_index ==
1209 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1210 1))
1211 phba->eh_sgl_alloc_index = 0;
1212 else
1213 phba->eh_sgl_alloc_index++;
1214 } else
1215 psgl_handle = NULL;
1216 return psgl_handle;
1217}
1218
1219void
1220free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1221{
1222
99bc5d55
JSJ
1223 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1224 "BM_%d : In free_mgmt_sgl_handle,"
1225 "eh_sgl_free_index=%d\n",
1226 phba->eh_sgl_free_index);
1227
6733b39a
JK
1228 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1229 /*
1230 * this can happen if clean_task is called on a task that
1231 * failed in xmit_task or alloc_pdu.
1232 */
99bc5d55
JSJ
1233 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1234 "BM_%d : Double Free in eh SGL ,"
1235 "eh_sgl_free_index=%d\n",
1236 phba->eh_sgl_free_index);
6733b39a
JK
1237 return;
1238 }
1239 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1240 phba->eh_sgl_hndl_avbl++;
1241 if (phba->eh_sgl_free_index ==
1242 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1243 phba->eh_sgl_free_index = 0;
1244 else
1245 phba->eh_sgl_free_index++;
1246}
1247
1248static void
1249be_complete_io(struct beiscsi_conn *beiscsi_conn,
1250 struct iscsi_task *task, struct sol_cqe *psol)
1251{
1252 struct beiscsi_io_task *io_task = task->dd_data;
1253 struct be_status_bhs *sts_bhs =
1254 (struct be_status_bhs *)io_task->cmd_bhs;
1255 struct iscsi_conn *conn = beiscsi_conn->conn;
6733b39a
JK
1256 unsigned char *sense;
1257 u32 resid = 0, exp_cmdsn, max_cmdsn;
1258 u8 rsp, status, flags;
1259
bfead3b2 1260 exp_cmdsn = (psol->
6733b39a
JK
1261 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1262 & SOL_EXP_CMD_SN_MASK);
bfead3b2 1263 max_cmdsn = ((psol->
6733b39a
JK
1264 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1265 & SOL_EXP_CMD_SN_MASK) +
1266 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1267 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1268 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1269 & SOL_RESP_MASK) >> 16);
1270 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1271 & SOL_STS_MASK) >> 8);
1272 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1273 & SOL_FLAGS_MASK) >> 24) | 0x80;
bd535451
JK
1274 if (!task->sc) {
1275 if (io_task->scsi_cmnd)
1276 scsi_dma_unmap(io_task->scsi_cmnd);
6733b39a 1277
bd535451
JK
1278 return;
1279 }
6733b39a
JK
1280 task->sc->result = (DID_OK << 16) | status;
1281 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1282 task->sc->result = DID_ERROR << 16;
1283 goto unmap;
1284 }
1285
1286 /* bidi not initially supported */
1287 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1288 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1289 32] & SOL_RES_CNT_MASK);
1290
1291 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1292 task->sc->result = DID_ERROR << 16;
1293
1294 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1295 scsi_set_resid(task->sc, resid);
1296 if (!status && (scsi_bufflen(task->sc) - resid <
1297 task->sc->underflow))
1298 task->sc->result = DID_ERROR << 16;
1299 }
1300 }
1301
1302 if (status == SAM_STAT_CHECK_CONDITION) {
4053a4be 1303 u16 sense_len;
bfead3b2 1304 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
4053a4be 1305
6733b39a 1306 sense = sts_bhs->sense_info + sizeof(unsigned short);
4053a4be 1307 sense_len = be16_to_cpu(*slen);
6733b39a
JK
1308 memcpy(task->sc->sense_buffer, sense,
1309 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1310 }
756d29c8 1311
6733b39a
JK
1312 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1313 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1314 & SOL_RES_CNT_MASK)
1315 conn->rxdata_octets += (psol->
bfead3b2
JK
1316 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1317 & SOL_RES_CNT_MASK);
6733b39a
JK
1318 }
1319unmap:
1320 scsi_dma_unmap(io_task->scsi_cmnd);
1321 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1322}
1323
1324static void
1325be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1326 struct iscsi_task *task, struct sol_cqe *psol)
1327{
1328 struct iscsi_logout_rsp *hdr;
bfead3b2 1329 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1330 struct iscsi_conn *conn = beiscsi_conn->conn;
1331
1332 hdr = (struct iscsi_logout_rsp *)task->hdr;
7bd6e25c 1333 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
6733b39a
JK
1334 hdr->t2wait = 5;
1335 hdr->t2retain = 0;
1336 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1337 & SOL_FLAGS_MASK) >> 24) | 0x80;
1338 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1339 32] & SOL_RESP_MASK);
1340 hdr->exp_cmdsn = cpu_to_be32(psol->
1341 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1342 & SOL_EXP_CMD_SN_MASK);
1343 hdr->max_cmdsn = be32_to_cpu((psol->
1344 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1345 & SOL_EXP_CMD_SN_MASK) +
1346 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1347 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
7bd6e25c
JK
1348 hdr->dlength[0] = 0;
1349 hdr->dlength[1] = 0;
1350 hdr->dlength[2] = 0;
6733b39a 1351 hdr->hlength = 0;
bfead3b2 1352 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1353 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1354}
1355
1356static void
1357be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1358 struct iscsi_task *task, struct sol_cqe *psol)
1359{
1360 struct iscsi_tm_rsp *hdr;
1361 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1362 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1363
1364 hdr = (struct iscsi_tm_rsp *)task->hdr;
7bd6e25c 1365 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
6733b39a
JK
1366 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1367 & SOL_FLAGS_MASK) >> 24) | 0x80;
1368 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1369 32] & SOL_RESP_MASK);
1370 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
bfead3b2 1371 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
6733b39a
JK
1372 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1373 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1374 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1375 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
bfead3b2 1376 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1377 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1378}
1379
1380static void
1381hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1382 struct beiscsi_hba *phba, struct sol_cqe *psol)
1383{
1384 struct hwi_wrb_context *pwrb_context;
bfead3b2 1385 struct wrb_handle *pwrb_handle = NULL;
6733b39a 1386 struct hwi_controller *phwi_ctrlr;
bfead3b2
JK
1387 struct iscsi_task *task;
1388 struct beiscsi_io_task *io_task;
6733b39a
JK
1389 struct iscsi_conn *conn = beiscsi_conn->conn;
1390 struct iscsi_session *session = conn->session;
1391
1392 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1393 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
35e66019 1394 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
7da50879
JK
1395 SOL_CID_MASK) >> 6) -
1396 phba->fw_config.iscsi_cid_start];
32951dd8 1397 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1398 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1399 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8 1400 task = pwrb_handle->pio_handle;
35e66019 1401
bfead3b2 1402 io_task = task->dd_data;
1282ab76 1403 spin_lock_bh(&phba->mgmt_sgl_lock);
bfead3b2 1404 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1282ab76 1405 spin_unlock_bh(&phba->mgmt_sgl_lock);
6733b39a
JK
1406 spin_lock_bh(&session->lock);
1407 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1408 spin_unlock_bh(&session->lock);
1409}
1410
1411static void
1412be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1413 struct iscsi_task *task, struct sol_cqe *psol)
1414{
1415 struct iscsi_nopin *hdr;
1416 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1417 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1418
1419 hdr = (struct iscsi_nopin *)task->hdr;
1420 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1421 & SOL_FLAGS_MASK) >> 24) | 0x80;
1422 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1423 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1424 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1425 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1426 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1427 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1428 hdr->opcode = ISCSI_OP_NOOP_IN;
bfead3b2 1429 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1430 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1431}
1432
1433static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1434 struct beiscsi_hba *phba, struct sol_cqe *psol)
1435{
1436 struct hwi_wrb_context *pwrb_context;
1437 struct wrb_handle *pwrb_handle;
1438 struct iscsi_wrb *pwrb = NULL;
1439 struct hwi_controller *phwi_ctrlr;
1440 struct iscsi_task *task;
bfead3b2 1441 unsigned int type;
6733b39a
JK
1442 struct iscsi_conn *conn = beiscsi_conn->conn;
1443 struct iscsi_session *session = conn->session;
1444
1445 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1446 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
35e66019 1447 (struct amap_sol_cqe, cid) / 32]
7da50879
JK
1448 & SOL_CID_MASK) >> 6) -
1449 phba->fw_config.iscsi_cid_start];
32951dd8 1450 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1451 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1452 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8
JK
1453 task = pwrb_handle->pio_handle;
1454 pwrb = pwrb_handle->pwrb;
1455 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1456 WRB_TYPE_MASK) >> 28;
1457
bfead3b2
JK
1458 spin_lock_bh(&session->lock);
1459 switch (type) {
6733b39a
JK
1460 case HWH_TYPE_IO:
1461 case HWH_TYPE_IO_RD:
1462 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
dafab8e0 1463 ISCSI_OP_NOOP_OUT)
6733b39a 1464 be_complete_nopin_resp(beiscsi_conn, task, psol);
dafab8e0 1465 else
6733b39a
JK
1466 be_complete_io(beiscsi_conn, task, psol);
1467 break;
1468
1469 case HWH_TYPE_LOGOUT:
dafab8e0
JK
1470 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1471 be_complete_logout(beiscsi_conn, task, psol);
1472 else
1473 be_complete_tmf(beiscsi_conn, task, psol);
1474
6733b39a
JK
1475 break;
1476
1477 case HWH_TYPE_LOGIN:
99bc5d55
JSJ
1478 beiscsi_log(phba, KERN_ERR,
1479 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1480 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1481 " hwi_complete_cmd- Solicited path\n");
6733b39a
JK
1482 break;
1483
6733b39a
JK
1484 case HWH_TYPE_NOP:
1485 be_complete_nopin_resp(beiscsi_conn, task, psol);
1486 break;
1487
1488 default:
99bc5d55
JSJ
1489 beiscsi_log(phba, KERN_WARNING,
1490 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1491 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1492 "wrb_index 0x%x CID 0x%x\n", type,
1493 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1494 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1495 ((psol->dw[offsetof(struct amap_sol_cqe,
1496 cid) / 32] & SOL_CID_MASK) >> 6));
6733b39a
JK
1497 break;
1498 }
35e66019 1499
6733b39a
JK
1500 spin_unlock_bh(&session->lock);
1501}
1502
1503static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1504 *pasync_ctx, unsigned int is_header,
1505 unsigned int host_write_ptr)
1506{
1507 if (is_header)
1508 return &pasync_ctx->async_entry[host_write_ptr].
1509 header_busy_list;
1510 else
1511 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1512}
1513
1514static struct async_pdu_handle *
1515hwi_get_async_handle(struct beiscsi_hba *phba,
1516 struct beiscsi_conn *beiscsi_conn,
1517 struct hwi_async_pdu_context *pasync_ctx,
1518 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1519{
1520 struct be_bus_address phys_addr;
1521 struct list_head *pbusy_list;
1522 struct async_pdu_handle *pasync_handle = NULL;
6733b39a
JK
1523 unsigned char is_header = 0;
1524
1525 phys_addr.u.a32.address_lo =
1526 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1527 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1528 & PDUCQE_DPL_MASK) >> 16);
1529 phys_addr.u.a32.address_hi =
1530 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1531
1532 phys_addr.u.a64.address =
1533 *((unsigned long long *)(&phys_addr.u.a64.address));
1534
1535 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1536 & PDUCQE_CODE_MASK) {
1537 case UNSOL_HDR_NOTIFY:
1538 is_header = 1;
1539
1540 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1541 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1542 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1543 break;
1544 case UNSOL_DATA_NOTIFY:
1545 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1546 dw[offsetof(struct amap_i_t_dpdu_cqe,
1547 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1548 break;
1549 default:
1550 pbusy_list = NULL;
99bc5d55
JSJ
1551 beiscsi_log(phba, KERN_WARNING,
1552 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1553 "BM_%d : Unexpected code=%d\n",
1554 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1555 code) / 32] & PDUCQE_CODE_MASK);
6733b39a
JK
1556 return NULL;
1557 }
1558
6733b39a
JK
1559 WARN_ON(list_empty(pbusy_list));
1560 list_for_each_entry(pasync_handle, pbusy_list, link) {
dc63aac6 1561 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
6733b39a
JK
1562 break;
1563 }
1564
1565 WARN_ON(!pasync_handle);
1566
7da50879
JK
1567 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1568 phba->fw_config.iscsi_cid_start;
6733b39a
JK
1569 pasync_handle->is_header = is_header;
1570 pasync_handle->buffer_len = ((pdpdu_cqe->
1571 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1572 & PDUCQE_DPL_MASK) >> 16);
1573
1574 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1575 index) / 32] & PDUCQE_INDEX_MASK);
1576 return pasync_handle;
1577}
1578
1579static unsigned int
99bc5d55
JSJ
1580hwi_update_async_writables(struct beiscsi_hba *phba,
1581 struct hwi_async_pdu_context *pasync_ctx,
1582 unsigned int is_header, unsigned int cq_index)
6733b39a
JK
1583{
1584 struct list_head *pbusy_list;
1585 struct async_pdu_handle *pasync_handle;
1586 unsigned int num_entries, writables = 0;
1587 unsigned int *pep_read_ptr, *pwritables;
1588
dc63aac6 1589 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1590 if (is_header) {
1591 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1592 pwritables = &pasync_ctx->async_header.writables;
6733b39a
JK
1593 } else {
1594 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1595 pwritables = &pasync_ctx->async_data.writables;
6733b39a
JK
1596 }
1597
1598 while ((*pep_read_ptr) != cq_index) {
1599 (*pep_read_ptr)++;
1600 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1601
1602 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1603 *pep_read_ptr);
1604 if (writables == 0)
1605 WARN_ON(list_empty(pbusy_list));
1606
1607 if (!list_empty(pbusy_list)) {
1608 pasync_handle = list_entry(pbusy_list->next,
1609 struct async_pdu_handle,
1610 link);
1611 WARN_ON(!pasync_handle);
1612 pasync_handle->consumed = 1;
1613 }
1614
1615 writables++;
1616 }
1617
1618 if (!writables) {
99bc5d55
JSJ
1619 beiscsi_log(phba, KERN_ERR,
1620 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1621 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1622 cq_index);
6733b39a
JK
1623 WARN_ON(1);
1624 }
1625
1626 *pwritables = *pwritables + writables;
1627 return 0;
1628}
1629
9728d8d0 1630static void hwi_free_async_msg(struct beiscsi_hba *phba,
6733b39a
JK
1631 unsigned int cri)
1632{
1633 struct hwi_controller *phwi_ctrlr;
1634 struct hwi_async_pdu_context *pasync_ctx;
1635 struct async_pdu_handle *pasync_handle, *tmp_handle;
1636 struct list_head *plist;
6733b39a
JK
1637
1638 phwi_ctrlr = phba->phwi_ctrlr;
1639 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1640
1641 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1642
1643 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1644 list_del(&pasync_handle->link);
1645
9728d8d0 1646 if (pasync_handle->is_header) {
6733b39a
JK
1647 list_add_tail(&pasync_handle->link,
1648 &pasync_ctx->async_header.free_list);
1649 pasync_ctx->async_header.free_entries++;
6733b39a
JK
1650 } else {
1651 list_add_tail(&pasync_handle->link,
1652 &pasync_ctx->async_data.free_list);
1653 pasync_ctx->async_data.free_entries++;
6733b39a
JK
1654 }
1655 }
1656
1657 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1658 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1659 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
6733b39a
JK
1660}
1661
1662static struct phys_addr *
1663hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1664 unsigned int is_header, unsigned int host_write_ptr)
1665{
1666 struct phys_addr *pasync_sge = NULL;
1667
1668 if (is_header)
1669 pasync_sge = pasync_ctx->async_header.ring_base;
1670 else
1671 pasync_sge = pasync_ctx->async_data.ring_base;
1672
1673 return pasync_sge + host_write_ptr;
1674}
1675
1676static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1677 unsigned int is_header)
1678{
1679 struct hwi_controller *phwi_ctrlr;
1680 struct hwi_async_pdu_context *pasync_ctx;
1681 struct async_pdu_handle *pasync_handle;
1682 struct list_head *pfree_link, *pbusy_list;
1683 struct phys_addr *pasync_sge;
1684 unsigned int ring_id, num_entries;
1685 unsigned int host_write_num;
1686 unsigned int writables;
1687 unsigned int i = 0;
1688 u32 doorbell = 0;
1689
1690 phwi_ctrlr = phba->phwi_ctrlr;
1691 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
dc63aac6 1692 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1693
1694 if (is_header) {
6733b39a
JK
1695 writables = min(pasync_ctx->async_header.writables,
1696 pasync_ctx->async_header.free_entries);
1697 pfree_link = pasync_ctx->async_header.free_list.next;
1698 host_write_num = pasync_ctx->async_header.host_write_ptr;
1699 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1700 } else {
6733b39a
JK
1701 writables = min(pasync_ctx->async_data.writables,
1702 pasync_ctx->async_data.free_entries);
1703 pfree_link = pasync_ctx->async_data.free_list.next;
1704 host_write_num = pasync_ctx->async_data.host_write_ptr;
1705 ring_id = phwi_ctrlr->default_pdu_data.id;
1706 }
1707
1708 writables = (writables / 8) * 8;
1709 if (writables) {
1710 for (i = 0; i < writables; i++) {
1711 pbusy_list =
1712 hwi_get_async_busy_list(pasync_ctx, is_header,
1713 host_write_num);
1714 pasync_handle =
1715 list_entry(pfree_link, struct async_pdu_handle,
1716 link);
1717 WARN_ON(!pasync_handle);
1718 pasync_handle->consumed = 0;
1719
1720 pfree_link = pfree_link->next;
1721
1722 pasync_sge = hwi_get_ring_address(pasync_ctx,
1723 is_header, host_write_num);
1724
1725 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1726 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1727
1728 list_move(&pasync_handle->link, pbusy_list);
1729
1730 host_write_num++;
1731 host_write_num = host_write_num % num_entries;
1732 }
1733
1734 if (is_header) {
1735 pasync_ctx->async_header.host_write_ptr =
1736 host_write_num;
1737 pasync_ctx->async_header.free_entries -= writables;
1738 pasync_ctx->async_header.writables -= writables;
1739 pasync_ctx->async_header.busy_entries += writables;
1740 } else {
1741 pasync_ctx->async_data.host_write_ptr = host_write_num;
1742 pasync_ctx->async_data.free_entries -= writables;
1743 pasync_ctx->async_data.writables -= writables;
1744 pasync_ctx->async_data.busy_entries += writables;
1745 }
1746
1747 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1748 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1749 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1750 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1751 << DB_DEF_PDU_CQPROC_SHIFT;
1752
1753 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1754 }
1755}
1756
1757static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1758 struct beiscsi_conn *beiscsi_conn,
1759 struct i_t_dpdu_cqe *pdpdu_cqe)
1760{
1761 struct hwi_controller *phwi_ctrlr;
1762 struct hwi_async_pdu_context *pasync_ctx;
1763 struct async_pdu_handle *pasync_handle = NULL;
1764 unsigned int cq_index = -1;
1765
1766 phwi_ctrlr = phba->phwi_ctrlr;
1767 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1768
1769 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1770 pdpdu_cqe, &cq_index);
1771 BUG_ON(pasync_handle->is_header != 0);
1772 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1773 hwi_update_async_writables(phba, pasync_ctx,
1774 pasync_handle->is_header, cq_index);
6733b39a
JK
1775
1776 hwi_free_async_msg(phba, pasync_handle->cri);
1777 hwi_post_async_buffers(phba, pasync_handle->is_header);
1778}
1779
1780static unsigned int
1781hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1782 struct beiscsi_hba *phba,
1783 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1784{
1785 struct list_head *plist;
1786 struct async_pdu_handle *pasync_handle;
1787 void *phdr = NULL;
1788 unsigned int hdr_len = 0, buf_len = 0;
1789 unsigned int status, index = 0, offset = 0;
1790 void *pfirst_buffer = NULL;
1791 unsigned int num_buf = 0;
1792
1793 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1794
1795 list_for_each_entry(pasync_handle, plist, link) {
1796 if (index == 0) {
1797 phdr = pasync_handle->pbuffer;
1798 hdr_len = pasync_handle->buffer_len;
1799 } else {
1800 buf_len = pasync_handle->buffer_len;
1801 if (!num_buf) {
1802 pfirst_buffer = pasync_handle->pbuffer;
1803 num_buf++;
1804 }
1805 memcpy(pfirst_buffer + offset,
1806 pasync_handle->pbuffer, buf_len);
f2ba02b8 1807 offset += buf_len;
6733b39a
JK
1808 }
1809 index++;
1810 }
1811
1812 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
7da50879
JK
1813 (beiscsi_conn->beiscsi_conn_cid -
1814 phba->fw_config.iscsi_cid_start),
1815 phdr, hdr_len, pfirst_buffer,
f2ba02b8 1816 offset);
6733b39a 1817
605c6cd2 1818 hwi_free_async_msg(phba, cri);
6733b39a
JK
1819 return 0;
1820}
1821
1822static unsigned int
1823hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1824 struct beiscsi_hba *phba,
1825 struct async_pdu_handle *pasync_handle)
1826{
1827 struct hwi_async_pdu_context *pasync_ctx;
1828 struct hwi_controller *phwi_ctrlr;
1829 unsigned int bytes_needed = 0, status = 0;
1830 unsigned short cri = pasync_handle->cri;
1831 struct pdu_base *ppdu;
1832
1833 phwi_ctrlr = phba->phwi_ctrlr;
1834 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1835
1836 list_del(&pasync_handle->link);
1837 if (pasync_handle->is_header) {
1838 pasync_ctx->async_header.busy_entries--;
1839 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1840 hwi_free_async_msg(phba, cri);
1841 BUG();
1842 }
1843
1844 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1845 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1846 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1847 (unsigned short)pasync_handle->buffer_len;
1848 list_add_tail(&pasync_handle->link,
1849 &pasync_ctx->async_entry[cri].wait_queue.list);
1850
1851 ppdu = pasync_handle->pbuffer;
1852 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1853 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1854 0xFFFF0000) | ((be16_to_cpu((ppdu->
1855 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1856 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1857
1858 if (status == 0) {
1859 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1860 bytes_needed;
1861
1862 if (bytes_needed == 0)
1863 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1864 pasync_ctx, cri);
1865 }
1866 } else {
1867 pasync_ctx->async_data.busy_entries--;
1868 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1869 list_add_tail(&pasync_handle->link,
1870 &pasync_ctx->async_entry[cri].wait_queue.
1871 list);
1872 pasync_ctx->async_entry[cri].wait_queue.
1873 bytes_received +=
1874 (unsigned short)pasync_handle->buffer_len;
1875
1876 if (pasync_ctx->async_entry[cri].wait_queue.
1877 bytes_received >=
1878 pasync_ctx->async_entry[cri].wait_queue.
1879 bytes_needed)
1880 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1881 pasync_ctx, cri);
1882 }
1883 }
1884 return status;
1885}
1886
1887static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1888 struct beiscsi_hba *phba,
1889 struct i_t_dpdu_cqe *pdpdu_cqe)
1890{
1891 struct hwi_controller *phwi_ctrlr;
1892 struct hwi_async_pdu_context *pasync_ctx;
1893 struct async_pdu_handle *pasync_handle = NULL;
1894 unsigned int cq_index = -1;
1895
1896 phwi_ctrlr = phba->phwi_ctrlr;
1897 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1898 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1899 pdpdu_cqe, &cq_index);
1900
1901 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1902 hwi_update_async_writables(phba, pasync_ctx,
1903 pasync_handle->is_header, cq_index);
1904
6733b39a
JK
1905 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1906 hwi_post_async_buffers(phba, pasync_handle->is_header);
1907}
1908
756d29c8
JK
1909static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1910{
1911 struct be_queue_info *mcc_cq;
1912 struct be_mcc_compl *mcc_compl;
1913 unsigned int num_processed = 0;
1914
1915 mcc_cq = &phba->ctrl.mcc_obj.cq;
1916 mcc_compl = queue_tail_node(mcc_cq);
1917 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1918 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1919
1920 if (num_processed >= 32) {
1921 hwi_ring_cq_db(phba, mcc_cq->id,
1922 num_processed, 0, 0);
1923 num_processed = 0;
1924 }
1925 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1926 /* Interpret flags as an async trailer */
1927 if (is_link_state_evt(mcc_compl->flags))
1928 /* Interpret compl as a async link evt */
1929 beiscsi_async_link_state_process(phba,
1930 (struct be_async_event_link_state *) mcc_compl);
1931 else
99bc5d55
JSJ
1932 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
1933 "BM_%d : Unsupported Async Event, flags"
1934 " = 0x%08x\n",
1935 mcc_compl->flags);
756d29c8
JK
1936 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1937 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1938 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1939 }
1940
1941 mcc_compl->flags = 0;
1942 queue_tail_inc(mcc_cq);
1943 mcc_compl = queue_tail_node(mcc_cq);
1944 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1945 num_processed++;
1946 }
1947
1948 if (num_processed > 0)
1949 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1950
1951}
bfead3b2 1952
6763daae
JSJ
1953/**
1954 * beiscsi_process_cq()- Process the Completion Queue
1955 * @pbe_eq: Event Q on which the Completion has come
1956 *
1957 * return
1958 * Number of Completion Entries processed.
1959 **/
bfead3b2 1960static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
6733b39a 1961{
6733b39a
JK
1962 struct be_queue_info *cq;
1963 struct sol_cqe *sol;
1964 struct dmsg_cqe *dmsg;
1965 unsigned int num_processed = 0;
1966 unsigned int tot_nump = 0;
0a513dd8 1967 unsigned short code = 0, cid = 0;
6733b39a 1968 struct beiscsi_conn *beiscsi_conn;
c2462288
JK
1969 struct beiscsi_endpoint *beiscsi_ep;
1970 struct iscsi_endpoint *ep;
bfead3b2 1971 struct beiscsi_hba *phba;
6733b39a 1972
bfead3b2 1973 cq = pbe_eq->cq;
6733b39a 1974 sol = queue_tail_node(cq);
bfead3b2 1975 phba = pbe_eq->phba;
6733b39a
JK
1976
1977 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1978 CQE_VALID_MASK) {
1979 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1980
0a513dd8
JSJ
1981 cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
1982 CQE_CID_MASK) >> 6);
1983 code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
1984 CQE_CODE_MASK);
1985 ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
32951dd8 1986
c2462288
JK
1987 beiscsi_ep = ep->dd_data;
1988 beiscsi_conn = beiscsi_ep->conn;
756d29c8 1989
6733b39a 1990 if (num_processed >= 32) {
bfead3b2 1991 hwi_ring_cq_db(phba, cq->id,
6733b39a
JK
1992 num_processed, 0, 0);
1993 tot_nump += num_processed;
1994 num_processed = 0;
1995 }
1996
0a513dd8 1997 switch (code) {
6733b39a
JK
1998 case SOL_CMD_COMPLETE:
1999 hwi_complete_cmd(beiscsi_conn, phba, sol);
2000 break;
2001 case DRIVERMSG_NOTIFY:
99bc5d55
JSJ
2002 beiscsi_log(phba, KERN_INFO,
2003 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2004 "BM_%d : Received %s[%d] on CID : %d\n",
2005 cqe_desc[code], code, cid);
99bc5d55 2006
6733b39a
JK
2007 dmsg = (struct dmsg_cqe *)sol;
2008 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2009 break;
2010 case UNSOL_HDR_NOTIFY:
99bc5d55
JSJ
2011 beiscsi_log(phba, KERN_INFO,
2012 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2013 "BM_%d : Received %s[%d] on CID : %d\n",
2014 cqe_desc[code], code, cid);
99bc5d55 2015
bfead3b2
JK
2016 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2017 (struct i_t_dpdu_cqe *)sol);
2018 break;
6733b39a 2019 case UNSOL_DATA_NOTIFY:
99bc5d55
JSJ
2020 beiscsi_log(phba, KERN_INFO,
2021 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2022 "BM_%d : Received %s[%d] on CID : %d\n",
2023 cqe_desc[code], code, cid);
99bc5d55 2024
6733b39a
JK
2025 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2026 (struct i_t_dpdu_cqe *)sol);
2027 break;
2028 case CXN_INVALIDATE_INDEX_NOTIFY:
2029 case CMD_INVALIDATED_NOTIFY:
2030 case CXN_INVALIDATE_NOTIFY:
99bc5d55
JSJ
2031 beiscsi_log(phba, KERN_ERR,
2032 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2033 "BM_%d : Ignoring %s[%d] on CID : %d\n",
2034 cqe_desc[code], code, cid);
6733b39a
JK
2035 break;
2036 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2037 case CMD_KILLED_INVALID_STATSN_RCVD:
2038 case CMD_KILLED_INVALID_R2T_RCVD:
2039 case CMD_CXN_KILLED_LUN_INVALID:
2040 case CMD_CXN_KILLED_ICD_INVALID:
2041 case CMD_CXN_KILLED_ITT_INVALID:
2042 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2043 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
99bc5d55
JSJ
2044 beiscsi_log(phba, KERN_ERR,
2045 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2046 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2047 cqe_desc[code], code, cid);
6733b39a
JK
2048 break;
2049 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
99bc5d55
JSJ
2050 beiscsi_log(phba, KERN_ERR,
2051 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2052 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2053 cqe_desc[code], code, cid);
6733b39a
JK
2054 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2055 (struct i_t_dpdu_cqe *) sol);
2056 break;
2057 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2058 case CXN_KILLED_BURST_LEN_MISMATCH:
2059 case CXN_KILLED_AHS_RCVD:
2060 case CXN_KILLED_HDR_DIGEST_ERR:
2061 case CXN_KILLED_UNKNOWN_HDR:
2062 case CXN_KILLED_STALE_ITT_TTT_RCVD:
2063 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2064 case CXN_KILLED_TIMED_OUT:
2065 case CXN_KILLED_FIN_RCVD:
6763daae
JSJ
2066 case CXN_KILLED_RST_SENT:
2067 case CXN_KILLED_RST_RCVD:
6733b39a
JK
2068 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2069 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2070 case CXN_KILLED_OVER_RUN_RESIDUAL:
2071 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2072 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
99bc5d55
JSJ
2073 beiscsi_log(phba, KERN_ERR,
2074 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2075 "BM_%d : Event %s[%d] received on CID : %d\n",
2076 cqe_desc[code], code, cid);
0a513dd8
JSJ
2077 if (beiscsi_conn)
2078 iscsi_conn_failure(beiscsi_conn->conn,
2079 ISCSI_ERR_CONN_FAILED);
6733b39a
JK
2080 break;
2081 default:
99bc5d55
JSJ
2082 beiscsi_log(phba, KERN_ERR,
2083 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2084 "BM_%d : Invalid CQE Event Received Code : %d"
2085 "CID 0x%x...\n",
0a513dd8 2086 code, cid);
6733b39a
JK
2087 break;
2088 }
2089
2090 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2091 queue_tail_inc(cq);
2092 sol = queue_tail_node(cq);
2093 num_processed++;
2094 }
2095
2096 if (num_processed > 0) {
2097 tot_nump += num_processed;
bfead3b2 2098 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
6733b39a
JK
2099 }
2100 return tot_nump;
2101}
2102
756d29c8 2103void beiscsi_process_all_cqs(struct work_struct *work)
6733b39a
JK
2104{
2105 unsigned long flags;
bfead3b2
JK
2106 struct hwi_controller *phwi_ctrlr;
2107 struct hwi_context_memory *phwi_context;
72fb46a9
JSJ
2108 struct beiscsi_hba *phba;
2109 struct be_eq_obj *pbe_eq =
2110 container_of(work, struct be_eq_obj, work_cqs);
6733b39a 2111
72fb46a9 2112 phba = pbe_eq->phba;
bfead3b2
JK
2113 phwi_ctrlr = phba->phwi_ctrlr;
2114 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2 2115
72fb46a9 2116 if (pbe_eq->todo_mcc_cq) {
6733b39a 2117 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 2118 pbe_eq->todo_mcc_cq = false;
6733b39a 2119 spin_unlock_irqrestore(&phba->isr_lock, flags);
756d29c8 2120 beiscsi_process_mcc_isr(phba);
6733b39a
JK
2121 }
2122
72fb46a9 2123 if (pbe_eq->todo_cq) {
6733b39a 2124 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 2125 pbe_eq->todo_cq = false;
6733b39a 2126 spin_unlock_irqrestore(&phba->isr_lock, flags);
bfead3b2 2127 beiscsi_process_cq(pbe_eq);
6733b39a 2128 }
72fb46a9
JSJ
2129
2130 /* rearm EQ for further interrupts */
2131 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2132}
2133
2134static int be_iopoll(struct blk_iopoll *iop, int budget)
2135{
2136 static unsigned int ret;
2137 struct beiscsi_hba *phba;
bfead3b2 2138 struct be_eq_obj *pbe_eq;
6733b39a 2139
bfead3b2
JK
2140 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2141 ret = beiscsi_process_cq(pbe_eq);
6733b39a 2142 if (ret < budget) {
bfead3b2 2143 phba = pbe_eq->phba;
6733b39a 2144 blk_iopoll_complete(iop);
99bc5d55
JSJ
2145 beiscsi_log(phba, KERN_INFO,
2146 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2147 "BM_%d : rearm pbe_eq->q.id =%d\n",
2148 pbe_eq->q.id);
bfead3b2 2149 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2150 }
2151 return ret;
2152}
2153
2154static void
2155hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2156 unsigned int num_sg, struct beiscsi_io_task *io_task)
2157{
2158 struct iscsi_sge *psgl;
58ff4bd0 2159 unsigned int sg_len, index;
6733b39a
JK
2160 unsigned int sge_len = 0;
2161 unsigned long long addr;
2162 struct scatterlist *l_sg;
2163 unsigned int offset;
2164
2165 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2166 io_task->bhs_pa.u.a32.address_lo);
2167 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2168 io_task->bhs_pa.u.a32.address_hi);
2169
2170 l_sg = sg;
48bd86cf
JK
2171 for (index = 0; (index < num_sg) && (index < 2); index++,
2172 sg = sg_next(sg)) {
6733b39a
JK
2173 if (index == 0) {
2174 sg_len = sg_dma_len(sg);
2175 addr = (u64) sg_dma_address(sg);
2176 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2177 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2178 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2179 ((u32)(addr >> 32)));
6733b39a
JK
2180 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2181 sg_len);
2182 sge_len = sg_len;
6733b39a 2183 } else {
6733b39a
JK
2184 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2185 pwrb, sge_len);
2186 sg_len = sg_dma_len(sg);
2187 addr = (u64) sg_dma_address(sg);
2188 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
457ff3b7 2189 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2190 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
457ff3b7 2191 ((u32)(addr >> 32)));
6733b39a
JK
2192 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2193 sg_len);
2194 }
2195 }
2196 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2197 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2198
2199 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2200
2201 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2202 io_task->bhs_pa.u.a32.address_hi);
2203 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2204 io_task->bhs_pa.u.a32.address_lo);
2205
caf818f1
JK
2206 if (num_sg == 1) {
2207 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2208 1);
2209 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2210 0);
2211 } else if (num_sg == 2) {
2212 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2213 0);
2214 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2215 1);
2216 } else {
2217 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2218 0);
2219 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2220 0);
2221 }
6733b39a
JK
2222 sg = l_sg;
2223 psgl++;
2224 psgl++;
2225 offset = 0;
48bd86cf 2226 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
6733b39a
JK
2227 sg_len = sg_dma_len(sg);
2228 addr = (u64) sg_dma_address(sg);
2229 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2230 (addr & 0xFFFFFFFF));
2231 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2232 (addr >> 32));
2233 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2234 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2235 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2236 offset += sg_len;
2237 }
2238 psgl--;
2239 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2240}
2241
d629c471
JSJ
2242/**
2243 * hwi_write_buffer()- Populate the WRB with task info
2244 * @pwrb: ptr to the WRB entry
2245 * @task: iscsi task which is to be executed
2246 **/
6733b39a
JK
2247static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2248{
2249 struct iscsi_sge *psgl;
6733b39a
JK
2250 struct beiscsi_io_task *io_task = task->dd_data;
2251 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2252 struct beiscsi_hba *phba = beiscsi_conn->phba;
2253
2254 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2255 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2256 io_task->bhs_pa.u.a32.address_lo);
2257 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2258 io_task->bhs_pa.u.a32.address_hi);
2259
2260 if (task->data) {
2261 if (task->data_count) {
2262 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
d629c471
JSJ
2263 io_task->mtask_addr = pci_map_single(phba->pcidev,
2264 task->data,
2265 task->data_count,
2266 PCI_DMA_TODEVICE);
2267
2268 io_task->mtask_data_count = task->data_count;
6733b39a
JK
2269 } else {
2270 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
d629c471 2271 io_task->mtask_addr = 0;
6733b39a
JK
2272 }
2273 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
d629c471 2274 lower_32_bits(io_task->mtask_addr));
6733b39a 2275 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
d629c471 2276 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2277 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2278 task->data_count);
2279
2280 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2281 } else {
2282 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
d629c471 2283 io_task->mtask_addr = 0;
6733b39a
JK
2284 }
2285
2286 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2287
2288 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2289
2290 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2291 io_task->bhs_pa.u.a32.address_hi);
2292 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2293 io_task->bhs_pa.u.a32.address_lo);
2294 if (task->data) {
2295 psgl++;
2296 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2297 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2298 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2299 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2300 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2301 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2302
2303 psgl++;
2304 if (task->data) {
2305 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
d629c471 2306 lower_32_bits(io_task->mtask_addr));
6733b39a 2307 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
d629c471 2308 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2309 }
2310 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2311 }
2312 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2313}
2314
2315static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2316{
bfead3b2 2317 unsigned int num_cq_pages, num_async_pdu_buf_pages;
6733b39a
JK
2318 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2319 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2320
2321 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2322 sizeof(struct sol_cqe));
6733b39a
JK
2323 num_async_pdu_buf_pages =
2324 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2325 phba->params.defpdu_hdr_sz);
2326 num_async_pdu_buf_sgl_pages =
2327 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2328 sizeof(struct phys_addr));
2329 num_async_pdu_data_pages =
2330 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2331 phba->params.defpdu_data_sz);
2332 num_async_pdu_data_sgl_pages =
2333 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2334 sizeof(struct phys_addr));
2335
2336 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2337
2338 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2339 BE_ISCSI_PDU_HEADER_SIZE;
2340 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2341 sizeof(struct hwi_context_memory);
2342
6733b39a
JK
2343
2344 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2345 * (phba->params.wrbs_per_cxn)
2346 * phba->params.cxns_per_ctrl;
2347 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2348 (phba->params.wrbs_per_cxn);
2349 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2350 phba->params.cxns_per_ctrl);
2351
2352 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2353 phba->params.icds_per_ctrl;
2354 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2355 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2356
2357 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2358 num_async_pdu_buf_pages * PAGE_SIZE;
2359 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2360 num_async_pdu_data_pages * PAGE_SIZE;
2361 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2362 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2363 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2364 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2365 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2366 phba->params.asyncpdus_per_ctrl *
2367 sizeof(struct async_pdu_handle);
2368 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2369 phba->params.asyncpdus_per_ctrl *
2370 sizeof(struct async_pdu_handle);
2371 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2372 sizeof(struct hwi_async_pdu_context) +
2373 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2374}
2375
2376static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2377{
2378 struct be_mem_descriptor *mem_descr;
2379 dma_addr_t bus_add;
2380 struct mem_array *mem_arr, *mem_arr_orig;
2381 unsigned int i, j, alloc_size, curr_alloc_size;
2382
3ec78271 2383 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
6733b39a
JK
2384 if (!phba->phwi_ctrlr)
2385 return -ENOMEM;
2386
2387 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2388 GFP_KERNEL);
2389 if (!phba->init_mem) {
2390 kfree(phba->phwi_ctrlr);
2391 return -ENOMEM;
2392 }
2393
2394 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2395 GFP_KERNEL);
2396 if (!mem_arr_orig) {
2397 kfree(phba->init_mem);
2398 kfree(phba->phwi_ctrlr);
2399 return -ENOMEM;
2400 }
2401
2402 mem_descr = phba->init_mem;
2403 for (i = 0; i < SE_MEM_MAX; i++) {
2404 j = 0;
2405 mem_arr = mem_arr_orig;
2406 alloc_size = phba->mem_req[i];
2407 memset(mem_arr, 0, sizeof(struct mem_array) *
2408 BEISCSI_MAX_FRAGS_INIT);
2409 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2410 do {
2411 mem_arr->virtual_address = pci_alloc_consistent(
2412 phba->pcidev,
2413 curr_alloc_size,
2414 &bus_add);
2415 if (!mem_arr->virtual_address) {
2416 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2417 goto free_mem;
2418 if (curr_alloc_size -
2419 rounddown_pow_of_two(curr_alloc_size))
2420 curr_alloc_size = rounddown_pow_of_two
2421 (curr_alloc_size);
2422 else
2423 curr_alloc_size = curr_alloc_size / 2;
2424 } else {
2425 mem_arr->bus_address.u.
2426 a64.address = (__u64) bus_add;
2427 mem_arr->size = curr_alloc_size;
2428 alloc_size -= curr_alloc_size;
2429 curr_alloc_size = min(be_max_phys_size *
2430 1024, alloc_size);
2431 j++;
2432 mem_arr++;
2433 }
2434 } while (alloc_size);
2435 mem_descr->num_elements = j;
2436 mem_descr->size_in_bytes = phba->mem_req[i];
2437 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2438 GFP_KERNEL);
2439 if (!mem_descr->mem_array)
2440 goto free_mem;
2441
2442 memcpy(mem_descr->mem_array, mem_arr_orig,
2443 sizeof(struct mem_array) * j);
2444 mem_descr++;
2445 }
2446 kfree(mem_arr_orig);
2447 return 0;
2448free_mem:
2449 mem_descr->num_elements = j;
2450 while ((i) || (j)) {
2451 for (j = mem_descr->num_elements; j > 0; j--) {
2452 pci_free_consistent(phba->pcidev,
2453 mem_descr->mem_array[j - 1].size,
2454 mem_descr->mem_array[j - 1].
2455 virtual_address,
457ff3b7
JK
2456 (unsigned long)mem_descr->
2457 mem_array[j - 1].
6733b39a
JK
2458 bus_address.u.a64.address);
2459 }
2460 if (i) {
2461 i--;
2462 kfree(mem_descr->mem_array);
2463 mem_descr--;
2464 }
2465 }
2466 kfree(mem_arr_orig);
2467 kfree(phba->init_mem);
2468 kfree(phba->phwi_ctrlr);
2469 return -ENOMEM;
2470}
2471
2472static int beiscsi_get_memory(struct beiscsi_hba *phba)
2473{
2474 beiscsi_find_mem_req(phba);
2475 return beiscsi_alloc_mem(phba);
2476}
2477
2478static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2479{
2480 struct pdu_data_out *pdata_out;
2481 struct pdu_nop_out *pnop_out;
2482 struct be_mem_descriptor *mem_descr;
2483
2484 mem_descr = phba->init_mem;
2485 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2486 pdata_out =
2487 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2488 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2489
2490 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2491 IIOC_SCSI_DATA);
2492
2493 pnop_out =
2494 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2495 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2496
2497 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2498 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2499 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2500 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2501}
2502
3ec78271 2503static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
6733b39a
JK
2504{
2505 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
3ec78271 2506 struct wrb_handle *pwrb_handle = NULL;
6733b39a
JK
2507 struct hwi_controller *phwi_ctrlr;
2508 struct hwi_wrb_context *pwrb_context;
3ec78271
JK
2509 struct iscsi_wrb *pwrb = NULL;
2510 unsigned int num_cxn_wrbh = 0;
2511 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
6733b39a
JK
2512
2513 mem_descr_wrbh = phba->init_mem;
2514 mem_descr_wrbh += HWI_MEM_WRBH;
2515
2516 mem_descr_wrb = phba->init_mem;
2517 mem_descr_wrb += HWI_MEM_WRB;
6733b39a
JK
2518 phwi_ctrlr = phba->phwi_ctrlr;
2519
2520 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2521 pwrb_context = &phwi_ctrlr->wrb_context[index];
6733b39a
JK
2522 pwrb_context->pwrb_handle_base =
2523 kzalloc(sizeof(struct wrb_handle *) *
2524 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2525 if (!pwrb_context->pwrb_handle_base) {
99bc5d55
JSJ
2526 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2527 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2528 goto init_wrb_hndl_failed;
2529 }
6733b39a
JK
2530 pwrb_context->pwrb_handle_basestd =
2531 kzalloc(sizeof(struct wrb_handle *) *
2532 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2533 if (!pwrb_context->pwrb_handle_basestd) {
99bc5d55
JSJ
2534 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2535 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2536 goto init_wrb_hndl_failed;
2537 }
2538 if (!num_cxn_wrbh) {
2539 pwrb_handle =
2540 mem_descr_wrbh->mem_array[idx].virtual_address;
2541 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2542 ((sizeof(struct wrb_handle)) *
2543 phba->params.wrbs_per_cxn));
2544 idx++;
2545 }
2546 pwrb_context->alloc_index = 0;
2547 pwrb_context->wrb_handles_available = 0;
2548 pwrb_context->free_index = 0;
2549
6733b39a 2550 if (num_cxn_wrbh) {
6733b39a
JK
2551 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2552 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2553 pwrb_context->pwrb_handle_basestd[j] =
2554 pwrb_handle;
2555 pwrb_context->wrb_handles_available++;
bfead3b2 2556 pwrb_handle->wrb_index = j;
6733b39a
JK
2557 pwrb_handle++;
2558 }
6733b39a
JK
2559 num_cxn_wrbh--;
2560 }
2561 }
2562 idx = 0;
ed58ea2a 2563 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
6733b39a 2564 pwrb_context = &phwi_ctrlr->wrb_context[index];
3ec78271 2565 if (!num_cxn_wrb) {
6733b39a 2566 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
7c56533c 2567 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
3ec78271
JK
2568 ((sizeof(struct iscsi_wrb) *
2569 phba->params.wrbs_per_cxn));
2570 idx++;
2571 }
2572
2573 if (num_cxn_wrb) {
6733b39a
JK
2574 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2575 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2576 pwrb_handle->pwrb = pwrb;
2577 pwrb++;
2578 }
2579 num_cxn_wrb--;
2580 }
2581 }
3ec78271
JK
2582 return 0;
2583init_wrb_hndl_failed:
2584 for (j = index; j > 0; j--) {
2585 pwrb_context = &phwi_ctrlr->wrb_context[j];
2586 kfree(pwrb_context->pwrb_handle_base);
2587 kfree(pwrb_context->pwrb_handle_basestd);
2588 }
2589 return -ENOMEM;
6733b39a
JK
2590}
2591
2592static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2593{
2594 struct hwi_controller *phwi_ctrlr;
2595 struct hba_parameters *p = &phba->params;
2596 struct hwi_async_pdu_context *pasync_ctx;
2597 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
dc63aac6 2598 unsigned int index, idx, num_per_mem, num_async_data;
6733b39a
JK
2599 struct be_mem_descriptor *mem_descr;
2600
2601 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2602 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2603
2604 phwi_ctrlr = phba->phwi_ctrlr;
2605 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2606 mem_descr->mem_array[0].virtual_address;
2607 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2608 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2609
dc63aac6
JK
2610 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2611 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
6733b39a
JK
2612
2613 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2614 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2615 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2616 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2617 "BM_%d : hwi_init_async_pdu_ctx"
2618 " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
2619 mem_descr->mem_array[0].virtual_address);
6733b39a 2620 } else
99bc5d55
JSJ
2621 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2622 "BM_%d : No Virtual address\n");
6733b39a
JK
2623
2624 pasync_ctx->async_header.va_base =
2625 mem_descr->mem_array[0].virtual_address;
2626
2627 pasync_ctx->async_header.pa_base.u.a64.address =
2628 mem_descr->mem_array[0].bus_address.u.a64.address;
2629
2630 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2631 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2632 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2633 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2634 "BM_%d : hwi_init_async_pdu_ctx"
2635 " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
2636 mem_descr->mem_array[0].virtual_address);
6733b39a 2637 } else
99bc5d55
JSJ
2638 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2639 "BM_%d : No Virtual address\n");
2640
6733b39a
JK
2641 pasync_ctx->async_header.ring_base =
2642 mem_descr->mem_array[0].virtual_address;
2643
2644 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2645 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2646 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2647 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2648 "BM_%d : hwi_init_async_pdu_ctx"
2649 " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
2650 mem_descr->mem_array[0].virtual_address);
6733b39a 2651 } else
99bc5d55
JSJ
2652 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2653 "BM_%d : No Virtual address\n");
6733b39a
JK
2654
2655 pasync_ctx->async_header.handle_base =
2656 mem_descr->mem_array[0].virtual_address;
2657 pasync_ctx->async_header.writables = 0;
2658 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2659
6733b39a
JK
2660
2661 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2662 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2663 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2664 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2665 "BM_%d : hwi_init_async_pdu_ctx"
2666 " HWI_MEM_ASYNC_DATA_RING va=%p\n",
2667 mem_descr->mem_array[0].virtual_address);
6733b39a 2668 } else
99bc5d55
JSJ
2669 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2670 "BM_%d : No Virtual address\n");
6733b39a
JK
2671
2672 pasync_ctx->async_data.ring_base =
2673 mem_descr->mem_array[0].virtual_address;
2674
2675 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2676 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2677 if (!mem_descr->mem_array[0].virtual_address)
99bc5d55
JSJ
2678 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2679 "BM_%d : No Virtual address\n");
6733b39a
JK
2680
2681 pasync_ctx->async_data.handle_base =
2682 mem_descr->mem_array[0].virtual_address;
2683 pasync_ctx->async_data.writables = 0;
2684 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2685
2686 pasync_header_h =
2687 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2688 pasync_data_h =
2689 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2690
dc63aac6
JK
2691 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2692 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2693 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2694 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2695 "BM_%d : hwi_init_async_pdu_ctx"
2696 " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
2697 mem_descr->mem_array[0].virtual_address);
dc63aac6 2698 } else
99bc5d55
JSJ
2699 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2700 "BM_%d : No Virtual address\n");
2701
dc63aac6
JK
2702 idx = 0;
2703 pasync_ctx->async_data.va_base =
2704 mem_descr->mem_array[idx].virtual_address;
2705 pasync_ctx->async_data.pa_base.u.a64.address =
2706 mem_descr->mem_array[idx].bus_address.u.a64.address;
2707
2708 num_async_data = ((mem_descr->mem_array[idx].size) /
2709 phba->params.defpdu_data_sz);
2710 num_per_mem = 0;
2711
6733b39a
JK
2712 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2713 pasync_header_h->cri = -1;
2714 pasync_header_h->index = (char)index;
2715 INIT_LIST_HEAD(&pasync_header_h->link);
2716 pasync_header_h->pbuffer =
2717 (void *)((unsigned long)
2718 (pasync_ctx->async_header.va_base) +
2719 (p->defpdu_hdr_sz * index));
2720
2721 pasync_header_h->pa.u.a64.address =
2722 pasync_ctx->async_header.pa_base.u.a64.address +
2723 (p->defpdu_hdr_sz * index);
2724
2725 list_add_tail(&pasync_header_h->link,
2726 &pasync_ctx->async_header.free_list);
2727 pasync_header_h++;
2728 pasync_ctx->async_header.free_entries++;
2729 pasync_ctx->async_header.writables++;
2730
2731 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2732 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2733 header_busy_list);
2734 pasync_data_h->cri = -1;
2735 pasync_data_h->index = (char)index;
2736 INIT_LIST_HEAD(&pasync_data_h->link);
dc63aac6
JK
2737
2738 if (!num_async_data) {
2739 num_per_mem = 0;
2740 idx++;
2741 pasync_ctx->async_data.va_base =
2742 mem_descr->mem_array[idx].virtual_address;
2743 pasync_ctx->async_data.pa_base.u.a64.address =
2744 mem_descr->mem_array[idx].
2745 bus_address.u.a64.address;
2746
2747 num_async_data = ((mem_descr->mem_array[idx].size) /
2748 phba->params.defpdu_data_sz);
2749 }
6733b39a
JK
2750 pasync_data_h->pbuffer =
2751 (void *)((unsigned long)
2752 (pasync_ctx->async_data.va_base) +
dc63aac6 2753 (p->defpdu_data_sz * num_per_mem));
6733b39a
JK
2754
2755 pasync_data_h->pa.u.a64.address =
2756 pasync_ctx->async_data.pa_base.u.a64.address +
dc63aac6
JK
2757 (p->defpdu_data_sz * num_per_mem);
2758 num_per_mem++;
2759 num_async_data--;
6733b39a
JK
2760
2761 list_add_tail(&pasync_data_h->link,
2762 &pasync_ctx->async_data.free_list);
2763 pasync_data_h++;
2764 pasync_ctx->async_data.free_entries++;
2765 pasync_ctx->async_data.writables++;
2766
2767 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2768 }
2769
2770 pasync_ctx->async_header.host_write_ptr = 0;
2771 pasync_ctx->async_header.ep_read_ptr = -1;
2772 pasync_ctx->async_data.host_write_ptr = 0;
2773 pasync_ctx->async_data.ep_read_ptr = -1;
2774}
2775
2776static int
2777be_sgl_create_contiguous(void *virtual_address,
2778 u64 physical_address, u32 length,
2779 struct be_dma_mem *sgl)
2780{
2781 WARN_ON(!virtual_address);
2782 WARN_ON(!physical_address);
2783 WARN_ON(!length > 0);
2784 WARN_ON(!sgl);
2785
2786 sgl->va = virtual_address;
457ff3b7 2787 sgl->dma = (unsigned long)physical_address;
6733b39a
JK
2788 sgl->size = length;
2789
2790 return 0;
2791}
2792
2793static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2794{
2795 memset(sgl, 0, sizeof(*sgl));
2796}
2797
2798static void
2799hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2800 struct mem_array *pmem, struct be_dma_mem *sgl)
2801{
2802 if (sgl->va)
2803 be_sgl_destroy_contiguous(sgl);
2804
2805 be_sgl_create_contiguous(pmem->virtual_address,
2806 pmem->bus_address.u.a64.address,
2807 pmem->size, sgl);
2808}
2809
2810static void
2811hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2812 struct mem_array *pmem, struct be_dma_mem *sgl)
2813{
2814 if (sgl->va)
2815 be_sgl_destroy_contiguous(sgl);
2816
2817 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2818 pmem->bus_address.u.a64.address,
2819 pmem->size, sgl);
2820}
2821
2822static int be_fill_queue(struct be_queue_info *q,
2823 u16 len, u16 entry_size, void *vaddress)
2824{
2825 struct be_dma_mem *mem = &q->dma_mem;
2826
2827 memset(q, 0, sizeof(*q));
2828 q->len = len;
2829 q->entry_size = entry_size;
2830 mem->size = len * entry_size;
2831 mem->va = vaddress;
2832 if (!mem->va)
2833 return -ENOMEM;
2834 memset(mem->va, 0, mem->size);
2835 return 0;
2836}
2837
bfead3b2 2838static int beiscsi_create_eqs(struct beiscsi_hba *phba,
6733b39a
JK
2839 struct hwi_context_memory *phwi_context)
2840{
bfead3b2 2841 unsigned int i, num_eq_pages;
99bc5d55 2842 int ret = 0, eq_for_mcc;
6733b39a
JK
2843 struct be_queue_info *eq;
2844 struct be_dma_mem *mem;
6733b39a 2845 void *eq_vaddress;
bfead3b2 2846 dma_addr_t paddr;
6733b39a 2847
bfead3b2
JK
2848 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2849 sizeof(struct be_eq_entry));
6733b39a 2850
bfead3b2
JK
2851 if (phba->msix_enabled)
2852 eq_for_mcc = 1;
2853 else
2854 eq_for_mcc = 0;
2855 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2856 eq = &phwi_context->be_eq[i].q;
2857 mem = &eq->dma_mem;
2858 phwi_context->be_eq[i].phba = phba;
2859 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2860 num_eq_pages * PAGE_SIZE,
2861 &paddr);
2862 if (!eq_vaddress)
2863 goto create_eq_error;
2864
2865 mem->va = eq_vaddress;
2866 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2867 sizeof(struct be_eq_entry), eq_vaddress);
2868 if (ret) {
99bc5d55
JSJ
2869 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2870 "BM_%d : be_fill_queue Failed for EQ\n");
bfead3b2
JK
2871 goto create_eq_error;
2872 }
6733b39a 2873
bfead3b2
JK
2874 mem->dma = paddr;
2875 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2876 phwi_context->cur_eqd);
2877 if (ret) {
99bc5d55
JSJ
2878 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2879 "BM_%d : beiscsi_cmd_eq_create"
2880 "Failed for EQ\n");
bfead3b2
JK
2881 goto create_eq_error;
2882 }
99bc5d55
JSJ
2883
2884 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2885 "BM_%d : eqid = %d\n",
2886 phwi_context->be_eq[i].q.id);
6733b39a 2887 }
6733b39a 2888 return 0;
bfead3b2 2889create_eq_error:
107dfcba 2890 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
bfead3b2
JK
2891 eq = &phwi_context->be_eq[i].q;
2892 mem = &eq->dma_mem;
2893 if (mem->va)
2894 pci_free_consistent(phba->pcidev, num_eq_pages
2895 * PAGE_SIZE,
2896 mem->va, mem->dma);
2897 }
2898 return ret;
6733b39a
JK
2899}
2900
bfead3b2 2901static int beiscsi_create_cqs(struct beiscsi_hba *phba,
6733b39a
JK
2902 struct hwi_context_memory *phwi_context)
2903{
bfead3b2 2904 unsigned int i, num_cq_pages;
99bc5d55 2905 int ret = 0;
6733b39a
JK
2906 struct be_queue_info *cq, *eq;
2907 struct be_dma_mem *mem;
bfead3b2 2908 struct be_eq_obj *pbe_eq;
6733b39a 2909 void *cq_vaddress;
bfead3b2 2910 dma_addr_t paddr;
6733b39a 2911
bfead3b2
JK
2912 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2913 sizeof(struct sol_cqe));
6733b39a 2914
bfead3b2
JK
2915 for (i = 0; i < phba->num_cpus; i++) {
2916 cq = &phwi_context->be_cq[i];
2917 eq = &phwi_context->be_eq[i].q;
2918 pbe_eq = &phwi_context->be_eq[i];
2919 pbe_eq->cq = cq;
2920 pbe_eq->phba = phba;
2921 mem = &cq->dma_mem;
2922 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2923 num_cq_pages * PAGE_SIZE,
2924 &paddr);
2925 if (!cq_vaddress)
2926 goto create_cq_error;
7da50879 2927 ret = be_fill_queue(cq, phba->params.num_cq_entries,
bfead3b2
JK
2928 sizeof(struct sol_cqe), cq_vaddress);
2929 if (ret) {
99bc5d55
JSJ
2930 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2931 "BM_%d : be_fill_queue Failed "
2932 "for ISCSI CQ\n");
bfead3b2
JK
2933 goto create_cq_error;
2934 }
2935
2936 mem->dma = paddr;
2937 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2938 false, 0);
2939 if (ret) {
99bc5d55
JSJ
2940 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2941 "BM_%d : beiscsi_cmd_eq_create"
2942 "Failed for ISCSI CQ\n");
bfead3b2
JK
2943 goto create_cq_error;
2944 }
99bc5d55
JSJ
2945 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2946 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
2947 "iSCSI CQ CREATED\n", cq->id, eq->id);
6733b39a 2948 }
6733b39a 2949 return 0;
bfead3b2
JK
2950
2951create_cq_error:
2952 for (i = 0; i < phba->num_cpus; i++) {
2953 cq = &phwi_context->be_cq[i];
2954 mem = &cq->dma_mem;
2955 if (mem->va)
2956 pci_free_consistent(phba->pcidev, num_cq_pages
2957 * PAGE_SIZE,
2958 mem->va, mem->dma);
2959 }
2960 return ret;
2961
6733b39a
JK
2962}
2963
2964static int
2965beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2966 struct hwi_context_memory *phwi_context,
2967 struct hwi_controller *phwi_ctrlr,
2968 unsigned int def_pdu_ring_sz)
2969{
2970 unsigned int idx;
2971 int ret;
2972 struct be_queue_info *dq, *cq;
2973 struct be_dma_mem *mem;
2974 struct be_mem_descriptor *mem_descr;
2975 void *dq_vaddress;
2976
2977 idx = 0;
2978 dq = &phwi_context->be_def_hdrq;
bfead3b2 2979 cq = &phwi_context->be_cq[0];
6733b39a
JK
2980 mem = &dq->dma_mem;
2981 mem_descr = phba->init_mem;
2982 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2983 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2984 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2985 sizeof(struct phys_addr),
2986 sizeof(struct phys_addr), dq_vaddress);
2987 if (ret) {
99bc5d55
JSJ
2988 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2989 "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
6733b39a
JK
2990 return ret;
2991 }
457ff3b7
JK
2992 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2993 bus_address.u.a64.address;
6733b39a
JK
2994 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2995 def_pdu_ring_sz,
2996 phba->params.defpdu_hdr_sz);
2997 if (ret) {
99bc5d55
JSJ
2998 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2999 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
6733b39a
JK
3000 return ret;
3001 }
3002 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
99bc5d55
JSJ
3003 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3004 "BM_%d : iscsi def pdu id is %d\n",
3005 phwi_context->be_def_hdrq.id);
3006
6733b39a
JK
3007 hwi_post_async_buffers(phba, 1);
3008 return 0;
3009}
3010
3011static int
3012beiscsi_create_def_data(struct beiscsi_hba *phba,
3013 struct hwi_context_memory *phwi_context,
3014 struct hwi_controller *phwi_ctrlr,
3015 unsigned int def_pdu_ring_sz)
3016{
3017 unsigned int idx;
3018 int ret;
3019 struct be_queue_info *dataq, *cq;
3020 struct be_dma_mem *mem;
3021 struct be_mem_descriptor *mem_descr;
3022 void *dq_vaddress;
3023
3024 idx = 0;
3025 dataq = &phwi_context->be_def_dataq;
bfead3b2 3026 cq = &phwi_context->be_cq[0];
6733b39a
JK
3027 mem = &dataq->dma_mem;
3028 mem_descr = phba->init_mem;
3029 mem_descr += HWI_MEM_ASYNC_DATA_RING;
3030 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3031 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3032 sizeof(struct phys_addr),
3033 sizeof(struct phys_addr), dq_vaddress);
3034 if (ret) {
99bc5d55
JSJ
3035 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3036 "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
6733b39a
JK
3037 return ret;
3038 }
457ff3b7
JK
3039 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3040 bus_address.u.a64.address;
6733b39a
JK
3041 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3042 def_pdu_ring_sz,
3043 phba->params.defpdu_data_sz);
3044 if (ret) {
99bc5d55
JSJ
3045 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3046 "BM_%d be_cmd_create_default_pdu_queue"
3047 " Failed for DEF PDU DATA\n");
6733b39a
JK
3048 return ret;
3049 }
3050 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
99bc5d55
JSJ
3051 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3052 "BM_%d : iscsi def data id is %d\n",
3053 phwi_context->be_def_dataq.id);
3054
6733b39a 3055 hwi_post_async_buffers(phba, 0);
99bc5d55
JSJ
3056 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3057 "BM_%d : DEFAULT PDU DATA RING CREATED\n");
3058
6733b39a
JK
3059 return 0;
3060}
3061
3062static int
3063beiscsi_post_pages(struct beiscsi_hba *phba)
3064{
3065 struct be_mem_descriptor *mem_descr;
3066 struct mem_array *pm_arr;
3067 unsigned int page_offset, i;
3068 struct be_dma_mem sgl;
3069 int status;
3070
3071 mem_descr = phba->init_mem;
3072 mem_descr += HWI_MEM_SGE;
3073 pm_arr = mem_descr->mem_array;
3074
3075 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3076 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
3077 for (i = 0; i < mem_descr->num_elements; i++) {
3078 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3079 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3080 page_offset,
3081 (pm_arr->size / PAGE_SIZE));
3082 page_offset += pm_arr->size / PAGE_SIZE;
3083 if (status != 0) {
99bc5d55
JSJ
3084 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3085 "BM_%d : post sgl failed.\n");
6733b39a
JK
3086 return status;
3087 }
3088 pm_arr++;
3089 }
99bc5d55
JSJ
3090 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3091 "BM_%d : POSTED PAGES\n");
6733b39a
JK
3092 return 0;
3093}
3094
bfead3b2
JK
3095static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3096{
3097 struct be_dma_mem *mem = &q->dma_mem;
c8b25598 3098 if (mem->va) {
bfead3b2
JK
3099 pci_free_consistent(phba->pcidev, mem->size,
3100 mem->va, mem->dma);
c8b25598
JK
3101 mem->va = NULL;
3102 }
bfead3b2
JK
3103}
3104
3105static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3106 u16 len, u16 entry_size)
3107{
3108 struct be_dma_mem *mem = &q->dma_mem;
3109
3110 memset(q, 0, sizeof(*q));
3111 q->len = len;
3112 q->entry_size = entry_size;
3113 mem->size = len * entry_size;
3114 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
3115 if (!mem->va)
d3ad2bb3 3116 return -ENOMEM;
bfead3b2
JK
3117 memset(mem->va, 0, mem->size);
3118 return 0;
3119}
3120
6733b39a
JK
3121static int
3122beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3123 struct hwi_context_memory *phwi_context,
3124 struct hwi_controller *phwi_ctrlr)
3125{
3126 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3127 u64 pa_addr_lo;
3128 unsigned int idx, num, i;
3129 struct mem_array *pwrb_arr;
3130 void *wrb_vaddr;
3131 struct be_dma_mem sgl;
3132 struct be_mem_descriptor *mem_descr;
3133 int status;
3134
3135 idx = 0;
3136 mem_descr = phba->init_mem;
3137 mem_descr += HWI_MEM_WRB;
3138 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3139 GFP_KERNEL);
3140 if (!pwrb_arr) {
99bc5d55
JSJ
3141 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3142 "BM_%d : Memory alloc failed in create wrb ring.\n");
6733b39a
JK
3143 return -ENOMEM;
3144 }
3145 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3146 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3147 num_wrb_rings = mem_descr->mem_array[idx].size /
3148 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3149
3150 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3151 if (num_wrb_rings) {
3152 pwrb_arr[num].virtual_address = wrb_vaddr;
3153 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3154 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3155 sizeof(struct iscsi_wrb);
3156 wrb_vaddr += pwrb_arr[num].size;
3157 pa_addr_lo += pwrb_arr[num].size;
3158 num_wrb_rings--;
3159 } else {
3160 idx++;
3161 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3162 pa_addr_lo = mem_descr->mem_array[idx].\
3163 bus_address.u.a64.address;
3164 num_wrb_rings = mem_descr->mem_array[idx].size /
3165 (phba->params.wrbs_per_cxn *
3166 sizeof(struct iscsi_wrb));
3167 pwrb_arr[num].virtual_address = wrb_vaddr;
3168 pwrb_arr[num].bus_address.u.a64.address\
3169 = pa_addr_lo;
3170 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3171 sizeof(struct iscsi_wrb);
3172 wrb_vaddr += pwrb_arr[num].size;
3173 pa_addr_lo += pwrb_arr[num].size;
3174 num_wrb_rings--;
3175 }
3176 }
3177 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3178 wrb_mem_index = 0;
3179 offset = 0;
3180 size = 0;
3181
3182 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3183 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3184 &phwi_context->be_wrbq[i]);
3185 if (status != 0) {
99bc5d55
JSJ
3186 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3187 "BM_%d : wrbq create failed.");
1462b8ff 3188 kfree(pwrb_arr);
6733b39a
JK
3189 return status;
3190 }
7da50879
JK
3191 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3192 id;
6733b39a
JK
3193 }
3194 kfree(pwrb_arr);
3195 return 0;
3196}
3197
3198static void free_wrb_handles(struct beiscsi_hba *phba)
3199{
3200 unsigned int index;
3201 struct hwi_controller *phwi_ctrlr;
3202 struct hwi_wrb_context *pwrb_context;
3203
3204 phwi_ctrlr = phba->phwi_ctrlr;
3205 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3206 pwrb_context = &phwi_ctrlr->wrb_context[index];
3207 kfree(pwrb_context->pwrb_handle_base);
3208 kfree(pwrb_context->pwrb_handle_basestd);
3209 }
3210}
3211
bfead3b2
JK
3212static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3213{
3214 struct be_queue_info *q;
3215 struct be_ctrl_info *ctrl = &phba->ctrl;
3216
3217 q = &phba->ctrl.mcc_obj.q;
3218 if (q->created)
3219 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3220 be_queue_free(phba, q);
3221
3222 q = &phba->ctrl.mcc_obj.cq;
3223 if (q->created)
3224 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3225 be_queue_free(phba, q);
3226}
3227
6733b39a
JK
3228static void hwi_cleanup(struct beiscsi_hba *phba)
3229{
3230 struct be_queue_info *q;
3231 struct be_ctrl_info *ctrl = &phba->ctrl;
3232 struct hwi_controller *phwi_ctrlr;
3233 struct hwi_context_memory *phwi_context;
bfead3b2 3234 int i, eq_num;
6733b39a
JK
3235
3236 phwi_ctrlr = phba->phwi_ctrlr;
3237 phwi_context = phwi_ctrlr->phwi_ctxt;
3238 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3239 q = &phwi_context->be_wrbq[i];
3240 if (q->created)
3241 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3242 }
6733b39a
JK
3243 free_wrb_handles(phba);
3244
3245 q = &phwi_context->be_def_hdrq;
3246 if (q->created)
3247 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3248
3249 q = &phwi_context->be_def_dataq;
3250 if (q->created)
3251 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3252
3253 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3254
bfead3b2
JK
3255 for (i = 0; i < (phba->num_cpus); i++) {
3256 q = &phwi_context->be_cq[i];
3257 if (q->created)
3258 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3259 }
3260 if (phba->msix_enabled)
3261 eq_num = 1;
3262 else
3263 eq_num = 0;
3264 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3265 q = &phwi_context->be_eq[i].q;
3266 if (q->created)
3267 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3268 }
3269 be_mcc_queues_destroy(phba);
3270}
6733b39a 3271
bfead3b2
JK
3272static int be_mcc_queues_create(struct beiscsi_hba *phba,
3273 struct hwi_context_memory *phwi_context)
3274{
3275 struct be_queue_info *q, *cq;
3276 struct be_ctrl_info *ctrl = &phba->ctrl;
3277
3278 /* Alloc MCC compl queue */
3279 cq = &phba->ctrl.mcc_obj.cq;
3280 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3281 sizeof(struct be_mcc_compl)))
3282 goto err;
3283 /* Ask BE to create MCC compl queue; */
3284 if (phba->msix_enabled) {
3285 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3286 [phba->num_cpus].q, false, true, 0))
3287 goto mcc_cq_free;
3288 } else {
3289 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3290 false, true, 0))
3291 goto mcc_cq_free;
3292 }
3293
3294 /* Alloc MCC queue */
3295 q = &phba->ctrl.mcc_obj.q;
3296 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3297 goto mcc_cq_destroy;
3298
3299 /* Ask BE to create MCC queue */
35e66019 3300 if (beiscsi_cmd_mccq_create(phba, q, cq))
bfead3b2
JK
3301 goto mcc_q_free;
3302
3303 return 0;
3304
3305mcc_q_free:
3306 be_queue_free(phba, q);
3307mcc_cq_destroy:
3308 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3309mcc_cq_free:
3310 be_queue_free(phba, cq);
3311err:
d3ad2bb3 3312 return -ENOMEM;
bfead3b2
JK
3313}
3314
107dfcba
JSJ
3315/**
3316 * find_num_cpus()- Get the CPU online count
3317 * @phba: ptr to priv structure
3318 *
3319 * CPU count is used for creating EQ.
3320 **/
3321static void find_num_cpus(struct beiscsi_hba *phba)
bfead3b2
JK
3322{
3323 int num_cpus = 0;
3324
3325 num_cpus = num_online_cpus();
bfead3b2 3326
107dfcba
JSJ
3327 phba->num_cpus = (num_cpus >= BEISCSI_MAX_NUM_CPU) ?
3328 (BEISCSI_MAX_NUM_CPU - 1) : num_cpus;
6733b39a
JK
3329}
3330
3331static int hwi_init_port(struct beiscsi_hba *phba)
3332{
3333 struct hwi_controller *phwi_ctrlr;
3334 struct hwi_context_memory *phwi_context;
3335 unsigned int def_pdu_ring_sz;
3336 struct be_ctrl_info *ctrl = &phba->ctrl;
3337 int status;
3338
3339 def_pdu_ring_sz =
3340 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3341 phwi_ctrlr = phba->phwi_ctrlr;
6733b39a 3342 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3343 phwi_context->max_eqd = 0;
3344 phwi_context->min_eqd = 0;
3345 phwi_context->cur_eqd = 64;
6733b39a 3346 be_cmd_fw_initialize(&phba->ctrl);
bfead3b2
JK
3347
3348 status = beiscsi_create_eqs(phba, phwi_context);
6733b39a 3349 if (status != 0) {
99bc5d55
JSJ
3350 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3351 "BM_%d : EQ not created\n");
6733b39a
JK
3352 goto error;
3353 }
3354
bfead3b2
JK
3355 status = be_mcc_queues_create(phba, phwi_context);
3356 if (status != 0)
3357 goto error;
3358
3359 status = mgmt_check_supported_fw(ctrl, phba);
6733b39a 3360 if (status != 0) {
99bc5d55
JSJ
3361 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3362 "BM_%d : Unsupported fw version\n");
6733b39a
JK
3363 goto error;
3364 }
3365
bfead3b2 3366 status = beiscsi_create_cqs(phba, phwi_context);
6733b39a 3367 if (status != 0) {
99bc5d55
JSJ
3368 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3369 "BM_%d : CQ not created\n");
6733b39a
JK
3370 goto error;
3371 }
3372
3373 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3374 def_pdu_ring_sz);
3375 if (status != 0) {
99bc5d55
JSJ
3376 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3377 "BM_%d : Default Header not created\n");
6733b39a
JK
3378 goto error;
3379 }
3380
3381 status = beiscsi_create_def_data(phba, phwi_context,
3382 phwi_ctrlr, def_pdu_ring_sz);
3383 if (status != 0) {
99bc5d55
JSJ
3384 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3385 "BM_%d : Default Data not created\n");
6733b39a
JK
3386 goto error;
3387 }
3388
3389 status = beiscsi_post_pages(phba);
3390 if (status != 0) {
99bc5d55
JSJ
3391 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3392 "BM_%d : Post SGL Pages Failed\n");
6733b39a
JK
3393 goto error;
3394 }
3395
3396 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3397 if (status != 0) {
99bc5d55
JSJ
3398 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3399 "BM_%d : WRB Rings not created\n");
6733b39a
JK
3400 goto error;
3401 }
3402
99bc5d55
JSJ
3403 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3404 "BM_%d : hwi_init_port success\n");
6733b39a
JK
3405 return 0;
3406
3407error:
99bc5d55
JSJ
3408 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3409 "BM_%d : hwi_init_port failed");
6733b39a 3410 hwi_cleanup(phba);
a49e06d5 3411 return status;
6733b39a
JK
3412}
3413
6733b39a
JK
3414static int hwi_init_controller(struct beiscsi_hba *phba)
3415{
3416 struct hwi_controller *phwi_ctrlr;
3417
3418 phwi_ctrlr = phba->phwi_ctrlr;
3419 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3420 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3421 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
99bc5d55
JSJ
3422 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3423 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3424 phwi_ctrlr->phwi_ctxt);
6733b39a 3425 } else {
99bc5d55
JSJ
3426 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3427 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3428 "than one element.Failing to load\n");
6733b39a
JK
3429 return -ENOMEM;
3430 }
3431
3432 iscsi_init_global_templates(phba);
3ec78271
JK
3433 if (beiscsi_init_wrb_handle(phba))
3434 return -ENOMEM;
3435
6733b39a
JK
3436 hwi_init_async_pdu_ctx(phba);
3437 if (hwi_init_port(phba) != 0) {
99bc5d55
JSJ
3438 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3439 "BM_%d : hwi_init_controller failed\n");
3440
6733b39a
JK
3441 return -ENOMEM;
3442 }
3443 return 0;
3444}
3445
3446static void beiscsi_free_mem(struct beiscsi_hba *phba)
3447{
3448 struct be_mem_descriptor *mem_descr;
3449 int i, j;
3450
3451 mem_descr = phba->init_mem;
3452 i = 0;
3453 j = 0;
3454 for (i = 0; i < SE_MEM_MAX; i++) {
3455 for (j = mem_descr->num_elements; j > 0; j--) {
3456 pci_free_consistent(phba->pcidev,
3457 mem_descr->mem_array[j - 1].size,
3458 mem_descr->mem_array[j - 1].virtual_address,
457ff3b7
JK
3459 (unsigned long)mem_descr->mem_array[j - 1].
3460 bus_address.u.a64.address);
6733b39a
JK
3461 }
3462 kfree(mem_descr->mem_array);
3463 mem_descr++;
3464 }
3465 kfree(phba->init_mem);
3466 kfree(phba->phwi_ctrlr);
3467}
3468
3469static int beiscsi_init_controller(struct beiscsi_hba *phba)
3470{
3471 int ret = -ENOMEM;
3472
3473 ret = beiscsi_get_memory(phba);
3474 if (ret < 0) {
99bc5d55
JSJ
3475 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3476 "BM_%d : beiscsi_dev_probe -"
3477 "Failed in beiscsi_alloc_memory\n");
6733b39a
JK
3478 return ret;
3479 }
3480
3481 ret = hwi_init_controller(phba);
3482 if (ret)
3483 goto free_init;
99bc5d55
JSJ
3484 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3485 "BM_%d : Return success from beiscsi_init_controller");
3486
6733b39a
JK
3487 return 0;
3488
3489free_init:
3490 beiscsi_free_mem(phba);
a49e06d5 3491 return ret;
6733b39a
JK
3492}
3493
3494static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3495{
3496 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3497 struct sgl_handle *psgl_handle;
3498 struct iscsi_sge *pfrag;
3499 unsigned int arr_index, i, idx;
3500
3501 phba->io_sgl_hndl_avbl = 0;
3502 phba->eh_sgl_hndl_avbl = 0;
bfead3b2 3503
6733b39a
JK
3504 mem_descr_sglh = phba->init_mem;
3505 mem_descr_sglh += HWI_MEM_SGLH;
3506 if (1 == mem_descr_sglh->num_elements) {
3507 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3508 phba->params.ios_per_ctrl,
3509 GFP_KERNEL);
3510 if (!phba->io_sgl_hndl_base) {
99bc5d55
JSJ
3511 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3512 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
3513 return -ENOMEM;
3514 }
3515 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3516 (phba->params.icds_per_ctrl -
3517 phba->params.ios_per_ctrl),
3518 GFP_KERNEL);
3519 if (!phba->eh_sgl_hndl_base) {
3520 kfree(phba->io_sgl_hndl_base);
99bc5d55
JSJ
3521 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3522 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
3523 return -ENOMEM;
3524 }
3525 } else {
99bc5d55
JSJ
3526 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3527 "BM_%d : HWI_MEM_SGLH is more than one element."
3528 "Failing to load\n");
6733b39a
JK
3529 return -ENOMEM;
3530 }
3531
3532 arr_index = 0;
3533 idx = 0;
3534 while (idx < mem_descr_sglh->num_elements) {
3535 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3536
3537 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3538 sizeof(struct sgl_handle)); i++) {
3539 if (arr_index < phba->params.ios_per_ctrl) {
3540 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3541 phba->io_sgl_hndl_avbl++;
3542 arr_index++;
3543 } else {
3544 phba->eh_sgl_hndl_base[arr_index -
3545 phba->params.ios_per_ctrl] =
3546 psgl_handle;
3547 arr_index++;
3548 phba->eh_sgl_hndl_avbl++;
3549 }
3550 psgl_handle++;
3551 }
3552 idx++;
3553 }
99bc5d55
JSJ
3554 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3555 "BM_%d : phba->io_sgl_hndl_avbl=%d"
3556 "phba->eh_sgl_hndl_avbl=%d\n",
3557 phba->io_sgl_hndl_avbl,
3558 phba->eh_sgl_hndl_avbl);
3559
6733b39a
JK
3560 mem_descr_sg = phba->init_mem;
3561 mem_descr_sg += HWI_MEM_SGE;
99bc5d55
JSJ
3562 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3563 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3564 mem_descr_sg->num_elements);
3565
6733b39a
JK
3566 arr_index = 0;
3567 idx = 0;
3568 while (idx < mem_descr_sg->num_elements) {
3569 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3570
3571 for (i = 0;
3572 i < (mem_descr_sg->mem_array[idx].size) /
3573 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3574 i++) {
3575 if (arr_index < phba->params.ios_per_ctrl)
3576 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3577 else
3578 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3579 phba->params.ios_per_ctrl];
3580 psgl_handle->pfrag = pfrag;
3581 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3582 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3583 pfrag += phba->params.num_sge_per_io;
3584 psgl_handle->sgl_index =
7da50879 3585 phba->fw_config.iscsi_icd_start + arr_index++;
6733b39a
JK
3586 }
3587 idx++;
3588 }
3589 phba->io_sgl_free_index = 0;
3590 phba->io_sgl_alloc_index = 0;
3591 phba->eh_sgl_free_index = 0;
3592 phba->eh_sgl_alloc_index = 0;
3593 return 0;
3594}
3595
3596static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3597{
3598 int i, new_cid;
3599
c2462288 3600 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
6733b39a
JK
3601 GFP_KERNEL);
3602 if (!phba->cid_array) {
99bc5d55
JSJ
3603 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3604 "BM_%d : Failed to allocate memory in "
3605 "hba_setup_cid_tbls\n");
6733b39a
JK
3606 return -ENOMEM;
3607 }
c2462288 3608 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
6733b39a
JK
3609 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3610 if (!phba->ep_array) {
99bc5d55
JSJ
3611 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3612 "BM_%d : Failed to allocate memory in "
3613 "hba_setup_cid_tbls\n");
6733b39a
JK
3614 kfree(phba->cid_array);
3615 return -ENOMEM;
3616 }
7da50879 3617 new_cid = phba->fw_config.iscsi_cid_start;
6733b39a
JK
3618 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3619 phba->cid_array[i] = new_cid;
3620 new_cid += 2;
3621 }
3622 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3623 return 0;
3624}
3625
238f6b72 3626static void hwi_enable_intr(struct beiscsi_hba *phba)
6733b39a
JK
3627{
3628 struct be_ctrl_info *ctrl = &phba->ctrl;
3629 struct hwi_controller *phwi_ctrlr;
3630 struct hwi_context_memory *phwi_context;
3631 struct be_queue_info *eq;
3632 u8 __iomem *addr;
bfead3b2 3633 u32 reg, i;
6733b39a
JK
3634 u32 enabled;
3635
3636 phwi_ctrlr = phba->phwi_ctrlr;
3637 phwi_context = phwi_ctrlr->phwi_ctxt;
3638
6733b39a
JK
3639 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3640 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3641 reg = ioread32(addr);
6733b39a
JK
3642
3643 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3644 if (!enabled) {
3645 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
99bc5d55
JSJ
3646 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3647 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
6733b39a 3648 iowrite32(reg, addr);
665d6d94
JK
3649 }
3650
3651 if (!phba->msix_enabled) {
3652 eq = &phwi_context->be_eq[0].q;
99bc5d55
JSJ
3653 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3654 "BM_%d : eq->id=%d\n", eq->id);
3655
665d6d94
JK
3656 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3657 } else {
3658 for (i = 0; i <= phba->num_cpus; i++) {
3659 eq = &phwi_context->be_eq[i].q;
99bc5d55
JSJ
3660 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3661 "BM_%d : eq->id=%d\n", eq->id);
bfead3b2
JK
3662 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3663 }
c03af1ae 3664 }
6733b39a
JK
3665}
3666
3667static void hwi_disable_intr(struct beiscsi_hba *phba)
3668{
3669 struct be_ctrl_info *ctrl = &phba->ctrl;
3670
3671 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3672 u32 reg = ioread32(addr);
3673
3674 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3675 if (enabled) {
3676 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3677 iowrite32(reg, addr);
3678 } else
99bc5d55
JSJ
3679 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3680 "BM_%d : In hwi_disable_intr, Already Disabled\n");
6733b39a
JK
3681}
3682
9aef4200
JSJ
3683/**
3684 * beiscsi_get_boot_info()- Get the boot session info
3685 * @phba: The device priv structure instance
3686 *
3687 * Get the boot target info and store in driver priv structure
3688 *
3689 * return values
3690 * Success: 0
3691 * Failure: Non-Zero Value
3692 **/
c7acc5b8
JK
3693static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3694{
0e43895e 3695 struct be_cmd_get_session_resp *session_resp;
c7acc5b8
JK
3696 struct be_mcc_wrb *wrb;
3697 struct be_dma_mem nonemb_cmd;
3698 unsigned int tag, wrb_num;
3699 unsigned short status, extd_status;
9aef4200 3700 unsigned int s_handle;
c7acc5b8 3701 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
f457a46f 3702 int ret = -ENOMEM;
c7acc5b8 3703
9aef4200
JSJ
3704 /* Get the session handle of the boot target */
3705 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
3706 if (ret) {
99bc5d55
JSJ
3707 beiscsi_log(phba, KERN_ERR,
3708 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3709 "BM_%d : No boot session\n");
9aef4200 3710 return ret;
c7acc5b8 3711 }
c7acc5b8
JK
3712 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3713 sizeof(*session_resp),
3714 &nonemb_cmd.dma);
3715 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
3716 beiscsi_log(phba, KERN_ERR,
3717 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3718 "BM_%d : Failed to allocate memory for"
3719 "beiscsi_get_session_info\n");
3720
c7acc5b8
JK
3721 return -ENOMEM;
3722 }
3723
3724 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
9aef4200 3725 tag = mgmt_get_session_info(phba, s_handle,
0e43895e 3726 &nonemb_cmd);
c7acc5b8 3727 if (!tag) {
99bc5d55
JSJ
3728 beiscsi_log(phba, KERN_ERR,
3729 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3730 "BM_%d : beiscsi_get_session_info"
3731 " Failed\n");
3732
c7acc5b8
JK
3733 goto boot_freemem;
3734 } else
3735 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3736 phba->ctrl.mcc_numtag[tag]);
3737
3738 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3739 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3740 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3741 if (status || extd_status) {
99bc5d55
JSJ
3742 beiscsi_log(phba, KERN_ERR,
3743 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3744 "BM_%d : beiscsi_get_session_info Failed"
3745 " status = %d extd_status = %d\n",
3746 status, extd_status);
3747
c7acc5b8
JK
3748 free_mcc_tag(&phba->ctrl, tag);
3749 goto boot_freemem;
3750 }
3751 wrb = queue_get_wrb(mccq, wrb_num);
3752 free_mcc_tag(&phba->ctrl, tag);
3753 session_resp = nonemb_cmd.va ;
f457a46f 3754
c7acc5b8
JK
3755 memcpy(&phba->boot_sess, &session_resp->session_info,
3756 sizeof(struct mgmt_session_info));
f457a46f
MC
3757 ret = 0;
3758
c7acc5b8
JK
3759boot_freemem:
3760 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3761 nonemb_cmd.va, nonemb_cmd.dma);
f457a46f
MC
3762 return ret;
3763}
3764
3765static void beiscsi_boot_release(void *data)
3766{
3767 struct beiscsi_hba *phba = data;
3768
3769 scsi_host_put(phba->shost);
3770}
3771
3772static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3773{
3774 struct iscsi_boot_kobj *boot_kobj;
3775
3776 /* get boot info using mgmt cmd */
3777 if (beiscsi_get_boot_info(phba))
3778 /* Try to see if we can carry on without this */
3779 return 0;
3780
3781 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3782 if (!phba->boot_kset)
3783 return -ENOMEM;
3784
3785 /* get a ref because the show function will ref the phba */
3786 if (!scsi_host_get(phba->shost))
3787 goto free_kset;
3788 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3789 beiscsi_show_boot_tgt_info,
3790 beiscsi_tgt_get_attr_visibility,
3791 beiscsi_boot_release);
3792 if (!boot_kobj)
3793 goto put_shost;
3794
3795 if (!scsi_host_get(phba->shost))
3796 goto free_kset;
3797 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3798 beiscsi_show_boot_ini_info,
3799 beiscsi_ini_get_attr_visibility,
3800 beiscsi_boot_release);
3801 if (!boot_kobj)
3802 goto put_shost;
3803
3804 if (!scsi_host_get(phba->shost))
3805 goto free_kset;
3806 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3807 beiscsi_show_boot_eth_info,
3808 beiscsi_eth_get_attr_visibility,
3809 beiscsi_boot_release);
3810 if (!boot_kobj)
3811 goto put_shost;
3812 return 0;
3813
3814put_shost:
3815 scsi_host_put(phba->shost);
3816free_kset:
3817 iscsi_boot_destroy_kset(phba->boot_kset);
c7acc5b8
JK
3818 return -ENOMEM;
3819}
3820
6733b39a
JK
3821static int beiscsi_init_port(struct beiscsi_hba *phba)
3822{
3823 int ret;
3824
3825 ret = beiscsi_init_controller(phba);
3826 if (ret < 0) {
99bc5d55
JSJ
3827 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3828 "BM_%d : beiscsi_dev_probe - Failed in"
3829 "beiscsi_init_controller\n");
6733b39a
JK
3830 return ret;
3831 }
3832 ret = beiscsi_init_sgl_handle(phba);
3833 if (ret < 0) {
99bc5d55
JSJ
3834 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3835 "BM_%d : beiscsi_dev_probe - Failed in"
3836 "beiscsi_init_sgl_handle\n");
6733b39a
JK
3837 goto do_cleanup_ctrlr;
3838 }
3839
3840 if (hba_setup_cid_tbls(phba)) {
99bc5d55
JSJ
3841 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3842 "BM_%d : Failed in hba_setup_cid_tbls\n");
6733b39a
JK
3843 kfree(phba->io_sgl_hndl_base);
3844 kfree(phba->eh_sgl_hndl_base);
3845 goto do_cleanup_ctrlr;
3846 }
3847
3848 return ret;
3849
3850do_cleanup_ctrlr:
3851 hwi_cleanup(phba);
3852 return ret;
3853}
3854
3855static void hwi_purge_eq(struct beiscsi_hba *phba)
3856{
3857 struct hwi_controller *phwi_ctrlr;
3858 struct hwi_context_memory *phwi_context;
3859 struct be_queue_info *eq;
3860 struct be_eq_entry *eqe = NULL;
bfead3b2 3861 int i, eq_msix;
756d29c8 3862 unsigned int num_processed;
6733b39a
JK
3863
3864 phwi_ctrlr = phba->phwi_ctrlr;
3865 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3866 if (phba->msix_enabled)
3867 eq_msix = 1;
3868 else
3869 eq_msix = 0;
6733b39a 3870
bfead3b2
JK
3871 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3872 eq = &phwi_context->be_eq[i].q;
6733b39a 3873 eqe = queue_tail_node(eq);
756d29c8 3874 num_processed = 0;
bfead3b2
JK
3875 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3876 & EQE_VALID_MASK) {
3877 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3878 queue_tail_inc(eq);
3879 eqe = queue_tail_node(eq);
756d29c8 3880 num_processed++;
bfead3b2 3881 }
756d29c8
JK
3882
3883 if (num_processed)
3884 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
6733b39a
JK
3885 }
3886}
3887
3888static void beiscsi_clean_port(struct beiscsi_hba *phba)
3889{
03a12310 3890 int mgmt_status;
6733b39a
JK
3891
3892 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3893 if (mgmt_status)
99bc5d55
JSJ
3894 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3895 "BM_%d : mgmt_epfw_cleanup FAILED\n");
756d29c8 3896
6733b39a 3897 hwi_purge_eq(phba);
756d29c8 3898 hwi_cleanup(phba);
6733b39a
JK
3899 kfree(phba->io_sgl_hndl_base);
3900 kfree(phba->eh_sgl_hndl_base);
3901 kfree(phba->cid_array);
3902 kfree(phba->ep_array);
3903}
3904
d629c471
JSJ
3905/**
3906 * beiscsi_cleanup_task()- Free driver resources of the task
3907 * @task: ptr to the iscsi task
3908 *
3909 **/
1282ab76
MC
3910static void beiscsi_cleanup_task(struct iscsi_task *task)
3911{
3912 struct beiscsi_io_task *io_task = task->dd_data;
3913 struct iscsi_conn *conn = task->conn;
3914 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3915 struct beiscsi_hba *phba = beiscsi_conn->phba;
3916 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3917 struct hwi_wrb_context *pwrb_context;
3918 struct hwi_controller *phwi_ctrlr;
3919
3920 phwi_ctrlr = phba->phwi_ctrlr;
3921 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3922 - phba->fw_config.iscsi_cid_start];
3923
3924 if (io_task->cmd_bhs) {
3925 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3926 io_task->bhs_pa.u.a64.address);
3927 io_task->cmd_bhs = NULL;
3928 }
3929
3930 if (task->sc) {
3931 if (io_task->pwrb_handle) {
3932 free_wrb_handle(phba, pwrb_context,
3933 io_task->pwrb_handle);
3934 io_task->pwrb_handle = NULL;
3935 }
3936
3937 if (io_task->psgl_handle) {
3938 spin_lock(&phba->io_sgl_lock);
3939 free_io_sgl_handle(phba, io_task->psgl_handle);
3940 spin_unlock(&phba->io_sgl_lock);
3941 io_task->psgl_handle = NULL;
3942 }
3943 } else {
3944 if (!beiscsi_conn->login_in_progress) {
3945 if (io_task->pwrb_handle) {
3946 free_wrb_handle(phba, pwrb_context,
3947 io_task->pwrb_handle);
3948 io_task->pwrb_handle = NULL;
3949 }
3950 if (io_task->psgl_handle) {
3951 spin_lock(&phba->mgmt_sgl_lock);
3952 free_mgmt_sgl_handle(phba,
3953 io_task->psgl_handle);
3954 spin_unlock(&phba->mgmt_sgl_lock);
3955 io_task->psgl_handle = NULL;
3956 }
d629c471
JSJ
3957 if (io_task->mtask_addr) {
3958 pci_unmap_single(phba->pcidev,
3959 io_task->mtask_addr,
3960 io_task->mtask_data_count,
3961 PCI_DMA_TODEVICE);
3962 io_task->mtask_addr = 0;
3963 }
1282ab76
MC
3964 }
3965 }
3966}
3967
6733b39a
JK
3968void
3969beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3970 struct beiscsi_offload_params *params)
3971{
3972 struct wrb_handle *pwrb_handle;
3973 struct iscsi_target_context_update_wrb *pwrb = NULL;
3974 struct be_mem_descriptor *mem_descr;
3975 struct beiscsi_hba *phba = beiscsi_conn->phba;
1282ab76
MC
3976 struct iscsi_task *task = beiscsi_conn->task;
3977 struct iscsi_session *session = task->conn->session;
6733b39a
JK
3978 u32 doorbell = 0;
3979
3980 /*
3981 * We can always use 0 here because it is reserved by libiscsi for
3982 * login/startup related tasks.
3983 */
1282ab76
MC
3984 beiscsi_conn->login_in_progress = 0;
3985 spin_lock_bh(&session->lock);
3986 beiscsi_cleanup_task(task);
3987 spin_unlock_bh(&session->lock);
3988
7da50879 3989 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
d5431488 3990 phba->fw_config.iscsi_cid_start));
6733b39a
JK
3991 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3992 memset(pwrb, 0, sizeof(*pwrb));
3993 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3994 max_burst_length, pwrb, params->dw[offsetof
3995 (struct amap_beiscsi_offload_params,
3996 max_burst_length) / 32]);
3997 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3998 max_send_data_segment_length, pwrb,
3999 params->dw[offsetof(struct amap_beiscsi_offload_params,
4000 max_send_data_segment_length) / 32]);
4001 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4002 first_burst_length,
4003 pwrb,
4004 params->dw[offsetof(struct amap_beiscsi_offload_params,
4005 first_burst_length) / 32]);
4006
4007 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
4008 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4009 erl) / 32] & OFFLD_PARAMS_ERL));
4010 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
4011 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4012 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
4013 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
4014 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4015 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
4016 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
4017 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4018 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
4019 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
4020 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4021 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
4022 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
4023 pwrb,
4024 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4025 exp_statsn) / 32] + 1));
4026 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
4027 0x7);
4028 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
4029 pwrb, pwrb_handle->wrb_index);
4030 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
4031 pwrb, pwrb_handle->nxt_wrb_index);
4032 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4033 session_state, pwrb, 0);
4034 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
4035 pwrb, 1);
4036 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
4037 pwrb, 0);
4038 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
4039 0);
4040
4041 mem_descr = phba->init_mem;
4042 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
4043
4044 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4045 pad_buffer_addr_hi, pwrb,
4046 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
4047 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4048 pad_buffer_addr_lo, pwrb,
4049 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
4050
4051 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
4052
4053 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4054 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
bfead3b2 4055 << DB_DEF_PDU_WRB_INDEX_SHIFT;
6733b39a
JK
4056 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4057
4058 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4059}
4060
4061static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4062 int *index, int *age)
4063{
bfead3b2 4064 *index = (int)itt;
6733b39a
JK
4065 if (age)
4066 *age = conn->session->age;
4067}
4068
4069/**
4070 * beiscsi_alloc_pdu - allocates pdu and related resources
4071 * @task: libiscsi task
4072 * @opcode: opcode of pdu for task
4073 *
4074 * This is called with the session lock held. It will allocate
4075 * the wrb and sgl if needed for the command. And it will prep
4076 * the pdu's itt. beiscsi_parse_pdu will later translate
4077 * the pdu itt to the libiscsi task itt.
4078 */
4079static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4080{
4081 struct beiscsi_io_task *io_task = task->dd_data;
4082 struct iscsi_conn *conn = task->conn;
4083 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4084 struct beiscsi_hba *phba = beiscsi_conn->phba;
4085 struct hwi_wrb_context *pwrb_context;
4086 struct hwi_controller *phwi_ctrlr;
4087 itt_t itt;
2afc95bf
JK
4088 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4089 dma_addr_t paddr;
6733b39a 4090
2afc95bf 4091 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
bc7accec 4092 GFP_ATOMIC, &paddr);
2afc95bf
JK
4093 if (!io_task->cmd_bhs)
4094 return -ENOMEM;
2afc95bf 4095 io_task->bhs_pa.u.a64.address = paddr;
bfead3b2 4096 io_task->libiscsi_itt = (itt_t)task->itt;
6733b39a
JK
4097 io_task->conn = beiscsi_conn;
4098
4099 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4100 task->hdr_max = sizeof(struct be_cmd_bhs);
d2cecf0d 4101 io_task->psgl_handle = NULL;
3ec78271 4102 io_task->pwrb_handle = NULL;
6733b39a
JK
4103
4104 if (task->sc) {
4105 spin_lock(&phba->io_sgl_lock);
4106 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4107 spin_unlock(&phba->io_sgl_lock);
8359c79b
JSJ
4108 if (!io_task->psgl_handle) {
4109 beiscsi_log(phba, KERN_ERR,
4110 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4111 "BM_%d : Alloc of IO_SGL_ICD Failed"
4112 "for the CID : %d\n",
4113 beiscsi_conn->beiscsi_conn_cid);
2afc95bf 4114 goto free_hndls;
8359c79b 4115 }
d2cecf0d
JK
4116 io_task->pwrb_handle = alloc_wrb_handle(phba,
4117 beiscsi_conn->beiscsi_conn_cid -
4118 phba->fw_config.iscsi_cid_start);
8359c79b
JSJ
4119 if (!io_task->pwrb_handle) {
4120 beiscsi_log(phba, KERN_ERR,
4121 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4122 "BM_%d : Alloc of WRB_HANDLE Failed"
4123 "for the CID : %d\n",
4124 beiscsi_conn->beiscsi_conn_cid);
d2cecf0d 4125 goto free_io_hndls;
8359c79b 4126 }
6733b39a
JK
4127 } else {
4128 io_task->scsi_cmnd = NULL;
d7aea67b 4129 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
6733b39a
JK
4130 if (!beiscsi_conn->login_in_progress) {
4131 spin_lock(&phba->mgmt_sgl_lock);
4132 io_task->psgl_handle = (struct sgl_handle *)
4133 alloc_mgmt_sgl_handle(phba);
4134 spin_unlock(&phba->mgmt_sgl_lock);
8359c79b
JSJ
4135 if (!io_task->psgl_handle) {
4136 beiscsi_log(phba, KERN_ERR,
4137 BEISCSI_LOG_IO |
4138 BEISCSI_LOG_CONFIG,
4139 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4140 "for the CID : %d\n",
4141 beiscsi_conn->
4142 beiscsi_conn_cid);
2afc95bf 4143 goto free_hndls;
8359c79b 4144 }
2afc95bf 4145
6733b39a
JK
4146 beiscsi_conn->login_in_progress = 1;
4147 beiscsi_conn->plogin_sgl_handle =
4148 io_task->psgl_handle;
d2cecf0d
JK
4149 io_task->pwrb_handle =
4150 alloc_wrb_handle(phba,
4151 beiscsi_conn->beiscsi_conn_cid -
4152 phba->fw_config.iscsi_cid_start);
8359c79b
JSJ
4153 if (!io_task->pwrb_handle) {
4154 beiscsi_log(phba, KERN_ERR,
4155 BEISCSI_LOG_IO |
4156 BEISCSI_LOG_CONFIG,
4157 "BM_%d : Alloc of WRB_HANDLE Failed"
4158 "for the CID : %d\n",
4159 beiscsi_conn->
4160 beiscsi_conn_cid);
4161 goto free_mgmt_hndls;
4162 }
d2cecf0d
JK
4163 beiscsi_conn->plogin_wrb_handle =
4164 io_task->pwrb_handle;
4165
6733b39a
JK
4166 } else {
4167 io_task->psgl_handle =
4168 beiscsi_conn->plogin_sgl_handle;
d2cecf0d
JK
4169 io_task->pwrb_handle =
4170 beiscsi_conn->plogin_wrb_handle;
6733b39a 4171 }
1282ab76 4172 beiscsi_conn->task = task;
6733b39a
JK
4173 } else {
4174 spin_lock(&phba->mgmt_sgl_lock);
4175 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4176 spin_unlock(&phba->mgmt_sgl_lock);
8359c79b
JSJ
4177 if (!io_task->psgl_handle) {
4178 beiscsi_log(phba, KERN_ERR,
4179 BEISCSI_LOG_IO |
4180 BEISCSI_LOG_CONFIG,
4181 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4182 "for the CID : %d\n",
4183 beiscsi_conn->
4184 beiscsi_conn_cid);
2afc95bf 4185 goto free_hndls;
8359c79b 4186 }
d2cecf0d
JK
4187 io_task->pwrb_handle =
4188 alloc_wrb_handle(phba,
4189 beiscsi_conn->beiscsi_conn_cid -
4190 phba->fw_config.iscsi_cid_start);
8359c79b
JSJ
4191 if (!io_task->pwrb_handle) {
4192 beiscsi_log(phba, KERN_ERR,
4193 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4194 "BM_%d : Alloc of WRB_HANDLE Failed"
4195 "for the CID : %d\n",
4196 beiscsi_conn->beiscsi_conn_cid);
d2cecf0d 4197 goto free_mgmt_hndls;
8359c79b 4198 }
d2cecf0d 4199
6733b39a
JK
4200 }
4201 }
bfead3b2
JK
4202 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4203 wrb_index << 16) | (unsigned int)
4204 (io_task->psgl_handle->sgl_index));
32951dd8 4205 io_task->pwrb_handle->pio_handle = task;
bfead3b2 4206
6733b39a
JK
4207 io_task->cmd_bhs->iscsi_hdr.itt = itt;
4208 return 0;
2afc95bf 4209
d2cecf0d
JK
4210free_io_hndls:
4211 spin_lock(&phba->io_sgl_lock);
4212 free_io_sgl_handle(phba, io_task->psgl_handle);
4213 spin_unlock(&phba->io_sgl_lock);
4214 goto free_hndls;
4215free_mgmt_hndls:
4216 spin_lock(&phba->mgmt_sgl_lock);
4217 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4218 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
4219free_hndls:
4220 phwi_ctrlr = phba->phwi_ctrlr;
7da50879
JK
4221 pwrb_context = &phwi_ctrlr->wrb_context[
4222 beiscsi_conn->beiscsi_conn_cid -
4223 phba->fw_config.iscsi_cid_start];
d2cecf0d
JK
4224 if (io_task->pwrb_handle)
4225 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2afc95bf
JK
4226 io_task->pwrb_handle = NULL;
4227 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4228 io_task->bhs_pa.u.a64.address);
1282ab76 4229 io_task->cmd_bhs = NULL;
2afc95bf 4230 return -ENOMEM;
6733b39a
JK
4231}
4232
6733b39a
JK
4233static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4234 unsigned int num_sg, unsigned int xferlen,
4235 unsigned int writedir)
4236{
4237
4238 struct beiscsi_io_task *io_task = task->dd_data;
4239 struct iscsi_conn *conn = task->conn;
4240 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4241 struct beiscsi_hba *phba = beiscsi_conn->phba;
4242 struct iscsi_wrb *pwrb = NULL;
4243 unsigned int doorbell = 0;
4244
4245 pwrb = io_task->pwrb_handle->pwrb;
6733b39a
JK
4246 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4247 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4248
4249 if (writedir) {
32951dd8
JK
4250 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4251 INI_WR_CMD);
6733b39a 4252 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
6733b39a 4253 } else {
32951dd8
JK
4254 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4255 INI_RD_CMD);
6733b39a
JK
4256 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4257 }
6733b39a
JK
4258
4259 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
dc63aac6
JK
4260 cpu_to_be16(*(unsigned short *)
4261 &io_task->cmd_bhs->iscsi_hdr.lun));
6733b39a
JK
4262 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4263 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4264 io_task->pwrb_handle->wrb_index);
4265 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4266 be32_to_cpu(task->cmdsn));
4267 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4268 io_task->psgl_handle->sgl_index);
4269
4270 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4271
4272 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4273 io_task->pwrb_handle->nxt_wrb_index);
4274 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4275
4276 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4277 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4278 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4279 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4280
4281 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4282 return 0;
4283}
4284
4285static int beiscsi_mtask(struct iscsi_task *task)
4286{
dafab8e0 4287 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
4288 struct iscsi_conn *conn = task->conn;
4289 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4290 struct beiscsi_hba *phba = beiscsi_conn->phba;
4291 struct iscsi_wrb *pwrb = NULL;
4292 unsigned int doorbell = 0;
dafab8e0 4293 unsigned int cid;
6733b39a 4294
bfead3b2 4295 cid = beiscsi_conn->beiscsi_conn_cid;
6733b39a 4296 pwrb = io_task->pwrb_handle->pwrb;
caf818f1 4297 memset(pwrb, 0, sizeof(*pwrb));
6733b39a
JK
4298 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4299 be32_to_cpu(task->cmdsn));
4300 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4301 io_task->pwrb_handle->wrb_index);
4302 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4303 io_task->psgl_handle->sgl_index);
dafab8e0 4304
6733b39a
JK
4305 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4306 case ISCSI_OP_LOGIN:
32951dd8
JK
4307 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4308 TGT_DM_CMD);
6733b39a
JK
4309 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4310 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4311 hwi_write_buffer(pwrb, task);
4312 break;
4313 case ISCSI_OP_NOOP_OUT:
1390b01b
JK
4314 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4315 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4316 TGT_DM_CMD);
4317 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4318 pwrb, 0);
685e16fd 4319 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
1390b01b
JK
4320 } else {
4321 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4322 INI_RD_CMD);
685e16fd 4323 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
1390b01b 4324 }
6733b39a
JK
4325 hwi_write_buffer(pwrb, task);
4326 break;
4327 case ISCSI_OP_TEXT:
32951dd8 4328 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
b30c6dab 4329 TGT_DM_CMD);
0ecb0b45 4330 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
6733b39a
JK
4331 hwi_write_buffer(pwrb, task);
4332 break;
4333 case ISCSI_OP_SCSI_TMFUNC:
32951dd8
JK
4334 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4335 INI_TMF_CMD);
6733b39a
JK
4336 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4337 hwi_write_buffer(pwrb, task);
4338 break;
4339 case ISCSI_OP_LOGOUT:
4340 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4341 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
dafab8e0 4342 HWH_TYPE_LOGOUT);
6733b39a
JK
4343 hwi_write_buffer(pwrb, task);
4344 break;
4345
4346 default:
99bc5d55
JSJ
4347 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4348 "BM_%d : opcode =%d Not supported\n",
4349 task->hdr->opcode & ISCSI_OPCODE_MASK);
4350
6733b39a
JK
4351 return -EINVAL;
4352 }
4353
4354 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
51a46250 4355 task->data_count);
6733b39a
JK
4356 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4357 io_task->pwrb_handle->nxt_wrb_index);
4358 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4359
bfead3b2 4360 doorbell |= cid & DB_WRB_POST_CID_MASK;
32951dd8 4361 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4362 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4363 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4364 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4365 return 0;
4366}
4367
4368static int beiscsi_task_xmit(struct iscsi_task *task)
4369{
6733b39a
JK
4370 struct beiscsi_io_task *io_task = task->dd_data;
4371 struct scsi_cmnd *sc = task->sc;
6733b39a
JK
4372 struct scatterlist *sg;
4373 int num_sg;
4374 unsigned int writedir = 0, xferlen = 0;
4375
6733b39a
JK
4376 if (!sc)
4377 return beiscsi_mtask(task);
4378
4379 io_task->scsi_cmnd = sc;
4380 num_sg = scsi_dma_map(sc);
4381 if (num_sg < 0) {
99bc5d55
JSJ
4382 struct iscsi_conn *conn = task->conn;
4383 struct beiscsi_hba *phba = NULL;
4384
4385 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
4386 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
4387 "BM_%d : scsi_dma_map Failed\n");
4388
6733b39a
JK
4389 return num_sg;
4390 }
6733b39a
JK
4391 xferlen = scsi_bufflen(sc);
4392 sg = scsi_sglist(sc);
99bc5d55 4393 if (sc->sc_data_direction == DMA_TO_DEVICE)
6733b39a 4394 writedir = 1;
99bc5d55 4395 else
6733b39a 4396 writedir = 0;
99bc5d55 4397
6733b39a
JK
4398 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4399}
4400
ffce3e2e
JK
4401/**
4402 * beiscsi_bsg_request - handle bsg request from ISCSI transport
4403 * @job: job to handle
4404 */
4405static int beiscsi_bsg_request(struct bsg_job *job)
4406{
4407 struct Scsi_Host *shost;
4408 struct beiscsi_hba *phba;
4409 struct iscsi_bsg_request *bsg_req = job->request;
4410 int rc = -EINVAL;
4411 unsigned int tag;
4412 struct be_dma_mem nonemb_cmd;
4413 struct be_cmd_resp_hdr *resp;
4414 struct iscsi_bsg_reply *bsg_reply = job->reply;
4415 unsigned short status, extd_status;
4416
4417 shost = iscsi_job_to_shost(job);
4418 phba = iscsi_host_priv(shost);
4419
4420 switch (bsg_req->msgcode) {
4421 case ISCSI_BSG_HST_VENDOR:
4422 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4423 job->request_payload.payload_len,
4424 &nonemb_cmd.dma);
4425 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
4426 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4427 "BM_%d : Failed to allocate memory for "
4428 "beiscsi_bsg_request\n");
8359c79b 4429 return -ENOMEM;
ffce3e2e
JK
4430 }
4431 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4432 &nonemb_cmd);
4433 if (!tag) {
99bc5d55 4434 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
8359c79b 4435 "BM_%d : MBX Tag Allocation Failed\n");
99bc5d55 4436
ffce3e2e
JK
4437 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4438 nonemb_cmd.va, nonemb_cmd.dma);
4439 return -EAGAIN;
4440 } else
4441 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
4442 phba->ctrl.mcc_numtag[tag]);
4443 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4444 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4445 free_mcc_tag(&phba->ctrl, tag);
4446 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4447 sg_copy_from_buffer(job->reply_payload.sg_list,
4448 job->reply_payload.sg_cnt,
4449 nonemb_cmd.va, (resp->response_length
4450 + sizeof(*resp)));
4451 bsg_reply->reply_payload_rcv_len = resp->response_length;
4452 bsg_reply->result = status;
4453 bsg_job_done(job, bsg_reply->result,
4454 bsg_reply->reply_payload_rcv_len);
4455 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4456 nonemb_cmd.va, nonemb_cmd.dma);
4457 if (status || extd_status) {
99bc5d55 4458 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
8359c79b 4459 "BM_%d : MBX Cmd Failed"
99bc5d55
JSJ
4460 " status = %d extd_status = %d\n",
4461 status, extd_status);
4462
ffce3e2e 4463 return -EIO;
8359c79b
JSJ
4464 } else {
4465 rc = 0;
ffce3e2e
JK
4466 }
4467 break;
4468
4469 default:
99bc5d55
JSJ
4470 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4471 "BM_%d : Unsupported bsg command: 0x%x\n",
4472 bsg_req->msgcode);
ffce3e2e
JK
4473 break;
4474 }
4475
4476 return rc;
4477}
4478
99bc5d55
JSJ
4479void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4480{
4481 /* Set the logging parameter */
4482 beiscsi_log_enable_init(phba, beiscsi_log_enable);
4483}
4484
4d4d1ef8
JSJ
4485/*
4486 * beiscsi_quiesce()- Cleanup Driver resources
4487 * @phba: Instance Priv structure
4488 *
4489 * Free the OS and HW resources held by the driver
4490 **/
25602c97 4491static void beiscsi_quiesce(struct beiscsi_hba *phba)
6733b39a 4492{
bfead3b2
JK
4493 struct hwi_controller *phwi_ctrlr;
4494 struct hwi_context_memory *phwi_context;
4495 struct be_eq_obj *pbe_eq;
4496 unsigned int i, msix_vec;
6733b39a 4497
bfead3b2
JK
4498 phwi_ctrlr = phba->phwi_ctrlr;
4499 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 4500 hwi_disable_intr(phba);
bfead3b2
JK
4501 if (phba->msix_enabled) {
4502 for (i = 0; i <= phba->num_cpus; i++) {
4503 msix_vec = phba->msix_entries[i].vector;
4504 free_irq(msix_vec, &phwi_context->be_eq[i]);
8fcfb210 4505 kfree(phba->msi_name[i]);
bfead3b2
JK
4506 }
4507 } else
4508 if (phba->pcidev->irq)
4509 free_irq(phba->pcidev->irq, phba);
4510 pci_disable_msix(phba->pcidev);
6733b39a
JK
4511 destroy_workqueue(phba->wq);
4512 if (blk_iopoll_enabled)
bfead3b2
JK
4513 for (i = 0; i < phba->num_cpus; i++) {
4514 pbe_eq = &phwi_context->be_eq[i];
4515 blk_iopoll_disable(&pbe_eq->iopoll);
4516 }
6733b39a
JK
4517
4518 beiscsi_clean_port(phba);
4519 beiscsi_free_mem(phba);
e9b91193 4520
6733b39a
JK
4521 beiscsi_unmap_pci_function(phba);
4522 pci_free_consistent(phba->pcidev,
4523 phba->ctrl.mbox_mem_alloced.size,
4524 phba->ctrl.mbox_mem_alloced.va,
4525 phba->ctrl.mbox_mem_alloced.dma);
25602c97
JK
4526}
4527
4528static void beiscsi_remove(struct pci_dev *pcidev)
4529{
4530
4531 struct beiscsi_hba *phba = NULL;
4532
4533 phba = pci_get_drvdata(pcidev);
4534 if (!phba) {
4535 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4536 return;
4537 }
4538
0e43895e 4539 beiscsi_destroy_def_ifaces(phba);
25602c97 4540 beiscsi_quiesce(phba);
9d045163 4541 iscsi_boot_destroy_kset(phba->boot_kset);
6733b39a
JK
4542 iscsi_host_remove(phba->shost);
4543 pci_dev_put(phba->pcidev);
4544 iscsi_host_free(phba->shost);
8dce69ff 4545 pci_disable_device(pcidev);
6733b39a
JK
4546}
4547
25602c97
JK
4548static void beiscsi_shutdown(struct pci_dev *pcidev)
4549{
4550
4551 struct beiscsi_hba *phba = NULL;
4552
4553 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4554 if (!phba) {
4555 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4556 return;
4557 }
4558
4559 beiscsi_quiesce(phba);
8dce69ff 4560 pci_disable_device(pcidev);
25602c97
JK
4561}
4562
bfead3b2
JK
4563static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4564{
4565 int i, status;
4566
4567 for (i = 0; i <= phba->num_cpus; i++)
4568 phba->msix_entries[i].entry = i;
4569
4570 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4571 (phba->num_cpus + 1));
4572 if (!status)
4573 phba->msix_enabled = true;
4574
4575 return;
4576}
4577
6733b39a
JK
4578static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4579 const struct pci_device_id *id)
4580{
4581 struct beiscsi_hba *phba = NULL;
bfead3b2
JK
4582 struct hwi_controller *phwi_ctrlr;
4583 struct hwi_context_memory *phwi_context;
4584 struct be_eq_obj *pbe_eq;
107dfcba 4585 int ret, i;
6733b39a
JK
4586
4587 ret = beiscsi_enable_pci(pcidev);
4588 if (ret < 0) {
99bc5d55
JSJ
4589 dev_err(&pcidev->dev,
4590 "beiscsi_dev_probe - Failed to enable pci device\n");
6733b39a
JK
4591 return ret;
4592 }
4593
4594 phba = beiscsi_hba_alloc(pcidev);
4595 if (!phba) {
99bc5d55
JSJ
4596 dev_err(&pcidev->dev,
4597 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
6733b39a
JK
4598 goto disable_pci;
4599 }
4600
99bc5d55
JSJ
4601 /* Initialize Driver configuration Paramters */
4602 beiscsi_hba_attrs_init(phba);
4603
f98c96b0
JK
4604 switch (pcidev->device) {
4605 case BE_DEVICE_ID1:
4606 case OC_DEVICE_ID1:
4607 case OC_DEVICE_ID2:
4608 phba->generation = BE_GEN2;
4609 break;
4610 case BE_DEVICE_ID2:
4611 case OC_DEVICE_ID3:
4612 phba->generation = BE_GEN3;
4613 break;
4614 default:
4615 phba->generation = 0;
4616 }
4617
bfead3b2 4618 if (enable_msix)
107dfcba 4619 find_num_cpus(phba);
bfead3b2 4620 else
107dfcba
JSJ
4621 phba->num_cpus = 1;
4622
99bc5d55
JSJ
4623 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4624 "BM_%d : num_cpus = %d\n",
4625 phba->num_cpus);
bfead3b2 4626
b547f2d6 4627 if (enable_msix) {
bfead3b2 4628 beiscsi_msix_enable(phba);
b547f2d6
JK
4629 if (!phba->msix_enabled)
4630 phba->num_cpus = 1;
4631 }
6733b39a
JK
4632 ret = be_ctrl_init(phba, pcidev);
4633 if (ret) {
99bc5d55
JSJ
4634 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4635 "BM_%d : beiscsi_dev_probe-"
4636 "Failed in be_ctrl_init\n");
6733b39a
JK
4637 goto hba_free;
4638 }
4639
4d4d1ef8
JSJ
4640 ret = beiscsi_cmd_reset_function(phba);
4641 if (ret) {
4642 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4643 "BM_%d : Reset Failed. Aborting Crashdump\n");
4644 goto hba_free;
4645 }
4646 ret = be_chk_reset_complete(phba);
4647 if (ret) {
4648 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4649 "BM_%d : Failed to get out of reset."
4650 "Aborting Crashdump\n");
4651 goto hba_free;
e9b91193
JK
4652 }
4653
6733b39a
JK
4654 spin_lock_init(&phba->io_sgl_lock);
4655 spin_lock_init(&phba->mgmt_sgl_lock);
4656 spin_lock_init(&phba->isr_lock);
7da50879
JK
4657 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4658 if (ret != 0) {
99bc5d55
JSJ
4659 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4660 "BM_%d : Error getting fw config\n");
7da50879
JK
4661 goto free_port;
4662 }
4663 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
6733b39a 4664 beiscsi_get_params(phba);
aa874f07 4665 phba->shost->can_queue = phba->params.ios_per_ctrl;
6733b39a
JK
4666 ret = beiscsi_init_port(phba);
4667 if (ret < 0) {
99bc5d55
JSJ
4668 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4669 "BM_%d : beiscsi_dev_probe-"
4670 "Failed in beiscsi_init_port\n");
6733b39a
JK
4671 goto free_port;
4672 }
4673
756d29c8
JK
4674 for (i = 0; i < MAX_MCC_CMD ; i++) {
4675 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4676 phba->ctrl.mcc_tag[i] = i + 1;
4677 phba->ctrl.mcc_numtag[i + 1] = 0;
4678 phba->ctrl.mcc_tag_available++;
4679 }
4680
4681 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4682
72fb46a9 4683 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
6733b39a 4684 phba->shost->host_no);
278274d5 4685 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
6733b39a 4686 if (!phba->wq) {
99bc5d55
JSJ
4687 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4688 "BM_%d : beiscsi_dev_probe-"
4689 "Failed to allocate work queue\n");
6733b39a
JK
4690 goto free_twq;
4691 }
4692
6733b39a 4693
bfead3b2
JK
4694 phwi_ctrlr = phba->phwi_ctrlr;
4695 phwi_context = phwi_ctrlr->phwi_ctxt;
72fb46a9 4696
6733b39a 4697 if (blk_iopoll_enabled) {
bfead3b2
JK
4698 for (i = 0; i < phba->num_cpus; i++) {
4699 pbe_eq = &phwi_context->be_eq[i];
4700 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4701 be_iopoll);
4702 blk_iopoll_enable(&pbe_eq->iopoll);
4703 }
72fb46a9
JSJ
4704
4705 i = (phba->msix_enabled) ? i : 0;
4706 /* Work item for MCC handling */
4707 pbe_eq = &phwi_context->be_eq[i];
4708 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
4709 } else {
4710 if (phba->msix_enabled) {
4711 for (i = 0; i <= phba->num_cpus; i++) {
4712 pbe_eq = &phwi_context->be_eq[i];
4713 INIT_WORK(&pbe_eq->work_cqs,
4714 beiscsi_process_all_cqs);
4715 }
4716 } else {
4717 pbe_eq = &phwi_context->be_eq[0];
4718 INIT_WORK(&pbe_eq->work_cqs,
4719 beiscsi_process_all_cqs);
4720 }
6733b39a 4721 }
72fb46a9 4722
6733b39a
JK
4723 ret = beiscsi_init_irqs(phba);
4724 if (ret < 0) {
99bc5d55
JSJ
4725 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4726 "BM_%d : beiscsi_dev_probe-"
4727 "Failed to beiscsi_init_irqs\n");
6733b39a
JK
4728 goto free_blkenbld;
4729 }
238f6b72 4730 hwi_enable_intr(phba);
f457a46f
MC
4731
4732 if (beiscsi_setup_boot_info(phba))
4733 /*
4734 * log error but continue, because we may not be using
4735 * iscsi boot.
4736 */
99bc5d55
JSJ
4737 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4738 "BM_%d : Could not set up "
4739 "iSCSI boot info.\n");
f457a46f 4740
0e43895e 4741 beiscsi_create_def_ifaces(phba);
99bc5d55
JSJ
4742 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4743 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
6733b39a
JK
4744 return 0;
4745
6733b39a
JK
4746free_blkenbld:
4747 destroy_workqueue(phba->wq);
4748 if (blk_iopoll_enabled)
bfead3b2
JK
4749 for (i = 0; i < phba->num_cpus; i++) {
4750 pbe_eq = &phwi_context->be_eq[i];
4751 blk_iopoll_disable(&pbe_eq->iopoll);
4752 }
6733b39a
JK
4753free_twq:
4754 beiscsi_clean_port(phba);
4755 beiscsi_free_mem(phba);
4756free_port:
4757 pci_free_consistent(phba->pcidev,
4758 phba->ctrl.mbox_mem_alloced.size,
4759 phba->ctrl.mbox_mem_alloced.va,
4760 phba->ctrl.mbox_mem_alloced.dma);
4761 beiscsi_unmap_pci_function(phba);
4762hba_free:
238f6b72
JK
4763 if (phba->msix_enabled)
4764 pci_disable_msix(phba->pcidev);
6733b39a
JK
4765 iscsi_host_remove(phba->shost);
4766 pci_dev_put(phba->pcidev);
4767 iscsi_host_free(phba->shost);
4768disable_pci:
4769 pci_disable_device(pcidev);
4770 return ret;
4771}
4772
4773struct iscsi_transport beiscsi_iscsi_transport = {
4774 .owner = THIS_MODULE,
4775 .name = DRV_NAME,
9db0fb3a 4776 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
6733b39a 4777 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
6733b39a
JK
4778 .create_session = beiscsi_session_create,
4779 .destroy_session = beiscsi_session_destroy,
4780 .create_conn = beiscsi_conn_create,
4781 .bind_conn = beiscsi_conn_bind,
4782 .destroy_conn = iscsi_conn_teardown,
3128c6c7 4783 .attr_is_visible = be2iscsi_attr_is_visible,
0e43895e
MC
4784 .set_iface_param = be2iscsi_iface_set_param,
4785 .get_iface_param = be2iscsi_iface_get_param,
6733b39a 4786 .set_param = beiscsi_set_param,
c7f7fd5b 4787 .get_conn_param = iscsi_conn_get_param,
6733b39a
JK
4788 .get_session_param = iscsi_session_get_param,
4789 .get_host_param = beiscsi_get_host_param,
4790 .start_conn = beiscsi_conn_start,
fa95d206 4791 .stop_conn = iscsi_conn_stop,
6733b39a
JK
4792 .send_pdu = iscsi_conn_send_pdu,
4793 .xmit_task = beiscsi_task_xmit,
4794 .cleanup_task = beiscsi_cleanup_task,
4795 .alloc_pdu = beiscsi_alloc_pdu,
4796 .parse_pdu_itt = beiscsi_parse_pdu,
4797 .get_stats = beiscsi_conn_get_stats,
c7f7fd5b 4798 .get_ep_param = beiscsi_ep_get_param,
6733b39a
JK
4799 .ep_connect = beiscsi_ep_connect,
4800 .ep_poll = beiscsi_ep_poll,
4801 .ep_disconnect = beiscsi_ep_disconnect,
4802 .session_recovery_timedout = iscsi_session_recovery_timedout,
ffce3e2e 4803 .bsg_request = beiscsi_bsg_request,
6733b39a
JK
4804};
4805
4806static struct pci_driver beiscsi_pci_driver = {
4807 .name = DRV_NAME,
4808 .probe = beiscsi_dev_probe,
4809 .remove = beiscsi_remove,
25602c97 4810 .shutdown = beiscsi_shutdown,
6733b39a
JK
4811 .id_table = beiscsi_pci_id_table
4812};
4813
bfead3b2 4814
6733b39a
JK
4815static int __init beiscsi_module_init(void)
4816{
4817 int ret;
4818
4819 beiscsi_scsi_transport =
4820 iscsi_register_transport(&beiscsi_iscsi_transport);
4821 if (!beiscsi_scsi_transport) {
99bc5d55
JSJ
4822 printk(KERN_ERR
4823 "beiscsi_module_init - Unable to register beiscsi transport.\n");
f55a24f2 4824 return -ENOMEM;
6733b39a 4825 }
99bc5d55
JSJ
4826 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
4827 &beiscsi_iscsi_transport);
6733b39a
JK
4828
4829 ret = pci_register_driver(&beiscsi_pci_driver);
4830 if (ret) {
99bc5d55
JSJ
4831 printk(KERN_ERR
4832 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
6733b39a
JK
4833 goto unregister_iscsi_transport;
4834 }
4835 return 0;
4836
4837unregister_iscsi_transport:
4838 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4839 return ret;
4840}
4841
4842static void __exit beiscsi_module_exit(void)
4843{
4844 pci_unregister_driver(&beiscsi_pci_driver);
4845 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4846}
4847
4848module_init(beiscsi_module_init);
4849module_exit(beiscsi_module_exit);