[SCSI] scsi_debug: Add "removable" parameter
[linux-block.git] / drivers / scsi / qla4xxx / ql4_os.c
CommitLineData
afaf5a2d
DS
1/*
2 * QLogic iSCSI HBA Driver
7d01d069 3 * Copyright (c) 2003-2010 QLogic Corporation
afaf5a2d
DS
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/moduleparam.h>
5a0e3ad6 8#include <linux/slab.h>
2a991c21
MR
9#include <linux/blkdev.h>
10#include <linux/iscsi_boot_sysfs.h>
13483730 11#include <linux/inet.h>
afaf5a2d
DS
12
13#include <scsi/scsi_tcq.h>
14#include <scsi/scsicam.h>
15
16#include "ql4_def.h"
bee4fe8e
DS
17#include "ql4_version.h"
18#include "ql4_glbl.h"
19#include "ql4_dbg.h"
20#include "ql4_inline.h"
afaf5a2d
DS
21
22/*
23 * Driver version
24 */
47975477 25static char qla4xxx_version_str[40];
afaf5a2d
DS
26
27/*
28 * SRB allocation cache
29 */
e18b890b 30static struct kmem_cache *srb_cachep;
afaf5a2d
DS
31
32/*
33 * Module parameter information and variables
34 */
a7380a65 35static int ql4xdisablesysfsboot = 1;
13483730
MC
36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdisablesysfsboot,
a4e8a715
KH
38 " Set to disable exporting boot targets to sysfs.\n"
39 "\t\t 0 - Export boot targets\n"
40 "\t\t 1 - Do not export boot targets (Default)");
13483730 41
3573bfb2 42int ql4xdontresethba;
f4f5df23 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
afaf5a2d 44MODULE_PARM_DESC(ql4xdontresethba,
a4e8a715
KH
45 " Don't reset the HBA for driver recovery.\n"
46 "\t\t 0 - It will reset HBA (Default)\n"
47 "\t\t 1 - It will NOT reset HBA");
afaf5a2d 48
a4e8a715 49int ql4xextended_error_logging;
f4f5df23 50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
11010fec 51MODULE_PARM_DESC(ql4xextended_error_logging,
a4e8a715
KH
52 " Option to enable extended error logging.\n"
53 "\t\t 0 - no logging (Default)\n"
54 "\t\t 2 - debug logging");
afaf5a2d 55
f4f5df23
VC
56int ql4xenablemsix = 1;
57module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
58MODULE_PARM_DESC(ql4xenablemsix,
a4e8a715
KH
59 " Set to enable MSI or MSI-X interrupt mechanism.\n"
60 "\t\t 0 = enable INTx interrupt mechanism.\n"
61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
62 "\t\t 2 = enable MSI interrupt mechanism.");
477ffb9d 63
d510d965 64#define QL4_DEF_QDEPTH 32
8bb4033d
VC
65static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
66module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(ql4xmaxqdepth,
a4e8a715
KH
68 " Maximum queue depth to report for target devices.\n"
69 "\t\t Default: 32.");
d510d965 70
f7b4aa63
TP
71static int ql4xqfulltracking = 1;
72module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
73MODULE_PARM_DESC(ql4xqfulltracking,
74 " Enable or disable dynamic tracking and adjustment of\n"
75 "\t\t scsi device queue depth.\n"
76 "\t\t 0 - Disable.\n"
77 "\t\t 1 - Enable. (Default)");
78
3038727c
VC
79static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
80module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
81MODULE_PARM_DESC(ql4xsess_recovery_tmo,
3573bfb2 82 " Target Session Recovery Timeout.\n"
a4e8a715 83 "\t\t Default: 120 sec.");
3038727c 84
068237c8
TP
85int ql4xmdcapmask = 0x1F;
86module_param(ql4xmdcapmask, int, S_IRUGO);
87MODULE_PARM_DESC(ql4xmdcapmask,
88 " Set the Minidump driver capture mask level.\n"
89 "\t\t Default is 0x1F.\n"
90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
91
92int ql4xenablemd = 1;
93module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(ql4xenablemd,
95 " Set to enable minidump.\n"
96 "\t\t 0 - disable minidump\n"
97 "\t\t 1 - enable minidump (Default)");
98
b3a271a9 99static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
afaf5a2d
DS
100/*
101 * SCSI host template entry points
102 */
47975477 103static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
afaf5a2d
DS
104
105/*
106 * iSCSI template entry points
107 */
fca9f04d
MC
108static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
109 enum iscsi_param param, char *buf);
afaf5a2d
DS
110static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
111 enum iscsi_param param, char *buf);
aa1e93a2
MC
112static int qla4xxx_host_get_param(struct Scsi_Host *shost,
113 enum iscsi_host_param param, char *buf);
00c31889
MC
114static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
115 uint32_t len);
ed1086e0
VC
116static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
117 enum iscsi_param_type param_type,
118 int param, char *buf);
5c656af7 119static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
b3a271a9
MR
120static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
121 struct sockaddr *dst_addr,
122 int non_blocking);
123static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
124static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
125static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
126 enum iscsi_param param, char *buf);
127static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
128static struct iscsi_cls_conn *
129qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
130static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
131 struct iscsi_cls_conn *cls_conn,
132 uint64_t transport_fd, int is_leading);
133static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
134static struct iscsi_cls_session *
135qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
136 uint16_t qdepth, uint32_t initial_cmdsn);
137static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
138static void qla4xxx_task_work(struct work_struct *wdata);
139static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
140static int qla4xxx_task_xmit(struct iscsi_task *);
141static void qla4xxx_task_cleanup(struct iscsi_task *);
142static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
143static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
144 struct iscsi_stats *stats);
c0b9d3f7
VC
145static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
146 uint32_t iface_type, uint32_t payload_size,
147 uint32_t pid, struct sockaddr *dst_addr);
376738af
NJ
148static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
149 uint32_t *num_entries, char *buf);
150static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
c0b9d3f7 151
afaf5a2d
DS
152/*
153 * SCSI host template entry points
154 */
f281233d 155static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
09a0f719 156static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
afaf5a2d 157static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
ce545039 158static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
afaf5a2d
DS
159static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
160static int qla4xxx_slave_alloc(struct scsi_device *device);
161static int qla4xxx_slave_configure(struct scsi_device *device);
162static void qla4xxx_slave_destroy(struct scsi_device *sdev);
587a1f16 163static umode_t ql4_attr_is_visible(int param_type, int param);
95d31262 164static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
f7b4aa63
TP
165static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
166 int reason);
afaf5a2d 167
f4f5df23
VC
168static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
169 QLA82XX_LEGACY_INTR_CONFIG;
170
afaf5a2d
DS
171static struct scsi_host_template qla4xxx_driver_template = {
172 .module = THIS_MODULE,
173 .name = DRIVER_NAME,
174 .proc_name = DRIVER_NAME,
175 .queuecommand = qla4xxx_queuecommand,
176
09a0f719 177 .eh_abort_handler = qla4xxx_eh_abort,
afaf5a2d 178 .eh_device_reset_handler = qla4xxx_eh_device_reset,
ce545039 179 .eh_target_reset_handler = qla4xxx_eh_target_reset,
afaf5a2d 180 .eh_host_reset_handler = qla4xxx_eh_host_reset,
5c656af7 181 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
afaf5a2d
DS
182
183 .slave_configure = qla4xxx_slave_configure,
184 .slave_alloc = qla4xxx_slave_alloc,
185 .slave_destroy = qla4xxx_slave_destroy,
f7b4aa63 186 .change_queue_depth = qla4xxx_change_queue_depth,
afaf5a2d
DS
187
188 .this_id = -1,
189 .cmd_per_lun = 3,
190 .use_clustering = ENABLE_CLUSTERING,
191 .sg_tablesize = SG_ALL,
192
193 .max_sectors = 0xFFFF,
7ad633c0 194 .shost_attrs = qla4xxx_host_attrs,
95d31262 195 .host_reset = qla4xxx_host_reset,
a355943c 196 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
afaf5a2d
DS
197};
198
199static struct iscsi_transport qla4xxx_iscsi_transport = {
200 .owner = THIS_MODULE,
201 .name = DRIVER_NAME,
b3a271a9
MR
202 .caps = CAP_TEXT_NEGO |
203 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
204 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
205 CAP_MULTI_R2T,
3128c6c7 206 .attr_is_visible = ql4_attr_is_visible,
b3a271a9
MR
207 .create_session = qla4xxx_session_create,
208 .destroy_session = qla4xxx_session_destroy,
209 .start_conn = qla4xxx_conn_start,
210 .create_conn = qla4xxx_conn_create,
211 .bind_conn = qla4xxx_conn_bind,
212 .stop_conn = iscsi_conn_stop,
213 .destroy_conn = qla4xxx_conn_destroy,
214 .set_param = iscsi_set_param,
afaf5a2d 215 .get_conn_param = qla4xxx_conn_get_param,
fca9f04d 216 .get_session_param = qla4xxx_session_get_param,
b3a271a9
MR
217 .get_ep_param = qla4xxx_get_ep_param,
218 .ep_connect = qla4xxx_ep_connect,
219 .ep_poll = qla4xxx_ep_poll,
220 .ep_disconnect = qla4xxx_ep_disconnect,
221 .get_stats = qla4xxx_conn_get_stats,
222 .send_pdu = iscsi_conn_send_pdu,
223 .xmit_task = qla4xxx_task_xmit,
224 .cleanup_task = qla4xxx_task_cleanup,
225 .alloc_pdu = qla4xxx_alloc_pdu,
226
aa1e93a2 227 .get_host_param = qla4xxx_host_get_param,
d00efe3f 228 .set_iface_param = qla4xxx_iface_set_param,
ed1086e0 229 .get_iface_param = qla4xxx_get_iface_param,
a355943c 230 .bsg_request = qla4xxx_bsg_request,
c0b9d3f7 231 .send_ping = qla4xxx_send_ping,
376738af
NJ
232 .get_chap = qla4xxx_get_chap_list,
233 .delete_chap = qla4xxx_delete_chap,
afaf5a2d
DS
234};
235
236static struct scsi_transport_template *qla4xxx_scsi_transport;
237
c0b9d3f7
VC
238static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
239 uint32_t iface_type, uint32_t payload_size,
240 uint32_t pid, struct sockaddr *dst_addr)
241{
242 struct scsi_qla_host *ha = to_qla_host(shost);
243 struct sockaddr_in *addr;
244 struct sockaddr_in6 *addr6;
245 uint32_t options = 0;
246 uint8_t ipaddr[IPv6_ADDR_LEN];
247 int rval;
248
249 memset(ipaddr, 0, IPv6_ADDR_LEN);
250 /* IPv4 to IPv4 */
251 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
252 (dst_addr->sa_family == AF_INET)) {
253 addr = (struct sockaddr_in *)dst_addr;
254 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
255 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
256 "dest: %pI4\n", __func__,
257 &ha->ip_config.ip_address, ipaddr));
258 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
259 ipaddr);
260 if (rval)
261 rval = -EINVAL;
262 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
263 (dst_addr->sa_family == AF_INET6)) {
264 /* IPv6 to IPv6 */
265 addr6 = (struct sockaddr_in6 *)dst_addr;
266 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
267
268 options |= PING_IPV6_PROTOCOL_ENABLE;
269
270 /* Ping using LinkLocal address */
271 if ((iface_num == 0) || (iface_num == 1)) {
272 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
273 "src: %pI6 dest: %pI6\n", __func__,
274 &ha->ip_config.ipv6_link_local_addr,
275 ipaddr));
276 options |= PING_IPV6_LINKLOCAL_ADDR;
277 rval = qla4xxx_ping_iocb(ha, options, payload_size,
278 pid, ipaddr);
279 } else {
280 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
281 "not supported\n", __func__, iface_num);
282 rval = -ENOSYS;
283 goto exit_send_ping;
284 }
285
286 /*
287 * If ping using LinkLocal address fails, try ping using
288 * IPv6 address
289 */
290 if (rval != QLA_SUCCESS) {
291 options &= ~PING_IPV6_LINKLOCAL_ADDR;
292 if (iface_num == 0) {
293 options |= PING_IPV6_ADDR0;
294 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
295 "Ping src: %pI6 "
296 "dest: %pI6\n", __func__,
297 &ha->ip_config.ipv6_addr0,
298 ipaddr));
299 } else if (iface_num == 1) {
300 options |= PING_IPV6_ADDR1;
301 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
302 "Ping src: %pI6 "
303 "dest: %pI6\n", __func__,
304 &ha->ip_config.ipv6_addr1,
305 ipaddr));
306 }
307 rval = qla4xxx_ping_iocb(ha, options, payload_size,
308 pid, ipaddr);
309 if (rval)
310 rval = -EINVAL;
311 }
312 } else
313 rval = -ENOSYS;
314exit_send_ping:
315 return rval;
316}
317
587a1f16 318static umode_t ql4_attr_is_visible(int param_type, int param)
3128c6c7
MC
319{
320 switch (param_type) {
f27fb2ef
MC
321 case ISCSI_HOST_PARAM:
322 switch (param) {
323 case ISCSI_HOST_PARAM_HWADDRESS:
324 case ISCSI_HOST_PARAM_IPADDRESS:
325 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3254dbe9
VC
326 case ISCSI_HOST_PARAM_PORT_STATE:
327 case ISCSI_HOST_PARAM_PORT_SPEED:
f27fb2ef
MC
328 return S_IRUGO;
329 default:
330 return 0;
331 }
3128c6c7
MC
332 case ISCSI_PARAM:
333 switch (param) {
590134fa
MC
334 case ISCSI_PARAM_PERSISTENT_ADDRESS:
335 case ISCSI_PARAM_PERSISTENT_PORT:
3128c6c7
MC
336 case ISCSI_PARAM_CONN_ADDRESS:
337 case ISCSI_PARAM_CONN_PORT:
1d063c17
MC
338 case ISCSI_PARAM_TARGET_NAME:
339 case ISCSI_PARAM_TPGT:
340 case ISCSI_PARAM_TARGET_ALIAS:
b3a271a9
MR
341 case ISCSI_PARAM_MAX_BURST:
342 case ISCSI_PARAM_MAX_R2T:
343 case ISCSI_PARAM_FIRST_BURST:
344 case ISCSI_PARAM_MAX_RECV_DLENGTH:
345 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
de37920b 346 case ISCSI_PARAM_IFACE_NAME:
fca9f04d
MC
347 case ISCSI_PARAM_CHAP_OUT_IDX:
348 case ISCSI_PARAM_CHAP_IN_IDX:
349 case ISCSI_PARAM_USERNAME:
350 case ISCSI_PARAM_PASSWORD:
351 case ISCSI_PARAM_USERNAME_IN:
352 case ISCSI_PARAM_PASSWORD_IN:
3128c6c7
MC
353 return S_IRUGO;
354 default:
355 return 0;
356 }
b78dbba0
MC
357 case ISCSI_NET_PARAM:
358 switch (param) {
359 case ISCSI_NET_PARAM_IPV4_ADDR:
360 case ISCSI_NET_PARAM_IPV4_SUBNET:
361 case ISCSI_NET_PARAM_IPV4_GW:
362 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
363 case ISCSI_NET_PARAM_IFACE_ENABLE:
364 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
365 case ISCSI_NET_PARAM_IPV6_ADDR:
366 case ISCSI_NET_PARAM_IPV6_ROUTER:
367 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
368 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
6ac73e8c
VC
369 case ISCSI_NET_PARAM_VLAN_ID:
370 case ISCSI_NET_PARAM_VLAN_PRIORITY:
371 case ISCSI_NET_PARAM_VLAN_ENABLED:
943c157b 372 case ISCSI_NET_PARAM_MTU:
2ada7fc5 373 case ISCSI_NET_PARAM_PORT:
b78dbba0
MC
374 return S_IRUGO;
375 default:
376 return 0;
377 }
3128c6c7
MC
378 }
379
380 return 0;
381}
382
376738af
NJ
383static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
384 uint32_t *num_entries, char *buf)
385{
386 struct scsi_qla_host *ha = to_qla_host(shost);
387 struct ql4_chap_table *chap_table;
388 struct iscsi_chap_rec *chap_rec;
389 int max_chap_entries = 0;
390 int valid_chap_entries = 0;
391 int ret = 0, i;
392
393 if (is_qla8022(ha))
394 max_chap_entries = (ha->hw.flt_chap_size / 2) /
395 sizeof(struct ql4_chap_table);
396 else
397 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
398
399 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
400 __func__, *num_entries, chap_tbl_idx);
401
402 if (!buf) {
403 ret = -ENOMEM;
404 goto exit_get_chap_list;
405 }
406
407 chap_rec = (struct iscsi_chap_rec *) buf;
408 mutex_lock(&ha->chap_sem);
409 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
410 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
411 if (chap_table->cookie !=
412 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
413 continue;
414
415 chap_rec->chap_tbl_idx = i;
416 strncpy(chap_rec->username, chap_table->name,
417 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
418 strncpy(chap_rec->password, chap_table->secret,
419 QL4_CHAP_MAX_SECRET_LEN);
420 chap_rec->password_length = chap_table->secret_len;
421
422 if (chap_table->flags & BIT_7) /* local */
423 chap_rec->chap_type = CHAP_TYPE_OUT;
424
425 if (chap_table->flags & BIT_6) /* peer */
426 chap_rec->chap_type = CHAP_TYPE_IN;
427
428 chap_rec++;
429
430 valid_chap_entries++;
431 if (valid_chap_entries == *num_entries)
432 break;
433 else
434 continue;
435 }
436 mutex_unlock(&ha->chap_sem);
437
438exit_get_chap_list:
439 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
440 __func__, valid_chap_entries);
441 *num_entries = valid_chap_entries;
442 return ret;
443}
444
445static int __qla4xxx_is_chap_active(struct device *dev, void *data)
446{
447 int ret = 0;
448 uint16_t *chap_tbl_idx = (uint16_t *) data;
449 struct iscsi_cls_session *cls_session;
450 struct iscsi_session *sess;
451 struct ddb_entry *ddb_entry;
452
453 if (!iscsi_is_session_dev(dev))
454 goto exit_is_chap_active;
455
456 cls_session = iscsi_dev_to_session(dev);
457 sess = cls_session->dd_data;
458 ddb_entry = sess->dd_data;
459
460 if (iscsi_session_chkready(cls_session))
461 goto exit_is_chap_active;
462
463 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
464 ret = 1;
465
466exit_is_chap_active:
467 return ret;
468}
469
470static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
471 uint16_t chap_tbl_idx)
472{
473 int ret = 0;
474
475 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
476 __qla4xxx_is_chap_active);
477
478 return ret;
479}
480
481static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
482{
483 struct scsi_qla_host *ha = to_qla_host(shost);
484 struct ql4_chap_table *chap_table;
485 dma_addr_t chap_dma;
486 int max_chap_entries = 0;
487 uint32_t offset = 0;
488 uint32_t chap_size;
489 int ret = 0;
490
491 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
492 if (chap_table == NULL)
493 return -ENOMEM;
494
495 memset(chap_table, 0, sizeof(struct ql4_chap_table));
496
497 if (is_qla8022(ha))
498 max_chap_entries = (ha->hw.flt_chap_size / 2) /
499 sizeof(struct ql4_chap_table);
500 else
501 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
502
503 if (chap_tbl_idx > max_chap_entries) {
504 ret = -EINVAL;
505 goto exit_delete_chap;
506 }
507
508 /* Check if chap index is in use.
509 * If chap is in use don't delet chap entry */
510 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
511 if (ret) {
512 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
513 "delete from flash\n", chap_tbl_idx);
514 ret = -EBUSY;
515 goto exit_delete_chap;
516 }
517
518 chap_size = sizeof(struct ql4_chap_table);
519 if (is_qla40XX(ha))
520 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
521 else {
522 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
523 /* flt_chap_size is CHAP table size for both ports
524 * so divide it by 2 to calculate the offset for second port
525 */
526 if (ha->port_num == 1)
527 offset += (ha->hw.flt_chap_size / 2);
528 offset += (chap_tbl_idx * chap_size);
529 }
530
531 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
532 if (ret != QLA_SUCCESS) {
533 ret = -EINVAL;
534 goto exit_delete_chap;
535 }
536
537 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
538 __le16_to_cpu(chap_table->cookie)));
539
540 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
541 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
542 goto exit_delete_chap;
543 }
544
545 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
546
547 offset = FLASH_CHAP_OFFSET |
548 (chap_tbl_idx * sizeof(struct ql4_chap_table));
549 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
550 FLASH_OPT_RMW_COMMIT);
551 if (ret == QLA_SUCCESS && ha->chap_list) {
552 mutex_lock(&ha->chap_sem);
553 /* Update ha chap_list cache */
554 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
555 chap_table, sizeof(struct ql4_chap_table));
556 mutex_unlock(&ha->chap_sem);
557 }
558 if (ret != QLA_SUCCESS)
559 ret = -EINVAL;
560
561exit_delete_chap:
562 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
563 return ret;
564}
565
ed1086e0
VC
566static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
567 enum iscsi_param_type param_type,
568 int param, char *buf)
569{
570 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
571 struct scsi_qla_host *ha = to_qla_host(shost);
572 int len = -ENOSYS;
573
574 if (param_type != ISCSI_NET_PARAM)
575 return -ENOSYS;
576
577 switch (param) {
578 case ISCSI_NET_PARAM_IPV4_ADDR:
579 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
580 break;
581 case ISCSI_NET_PARAM_IPV4_SUBNET:
582 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
583 break;
584 case ISCSI_NET_PARAM_IPV4_GW:
585 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
586 break;
587 case ISCSI_NET_PARAM_IFACE_ENABLE:
588 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
589 len = sprintf(buf, "%s\n",
590 (ha->ip_config.ipv4_options &
591 IPOPT_IPV4_PROTOCOL_ENABLE) ?
592 "enabled" : "disabled");
593 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
594 len = sprintf(buf, "%s\n",
595 (ha->ip_config.ipv6_options &
596 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
597 "enabled" : "disabled");
598 break;
599 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
600 len = sprintf(buf, "%s\n",
601 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
602 "dhcp" : "static");
603 break;
604 case ISCSI_NET_PARAM_IPV6_ADDR:
605 if (iface->iface_num == 0)
606 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
607 if (iface->iface_num == 1)
608 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
609 break;
610 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
611 len = sprintf(buf, "%pI6\n",
612 &ha->ip_config.ipv6_link_local_addr);
613 break;
614 case ISCSI_NET_PARAM_IPV6_ROUTER:
615 len = sprintf(buf, "%pI6\n",
616 &ha->ip_config.ipv6_default_router_addr);
617 break;
618 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
619 len = sprintf(buf, "%s\n",
620 (ha->ip_config.ipv6_addl_options &
621 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
622 "nd" : "static");
623 break;
624 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
625 len = sprintf(buf, "%s\n",
626 (ha->ip_config.ipv6_addl_options &
627 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
628 "auto" : "static");
629 break;
6ac73e8c
VC
630 case ISCSI_NET_PARAM_VLAN_ID:
631 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
632 len = sprintf(buf, "%d\n",
633 (ha->ip_config.ipv4_vlan_tag &
634 ISCSI_MAX_VLAN_ID));
635 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
636 len = sprintf(buf, "%d\n",
637 (ha->ip_config.ipv6_vlan_tag &
638 ISCSI_MAX_VLAN_ID));
639 break;
640 case ISCSI_NET_PARAM_VLAN_PRIORITY:
641 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
642 len = sprintf(buf, "%d\n",
643 ((ha->ip_config.ipv4_vlan_tag >> 13) &
644 ISCSI_MAX_VLAN_PRIORITY));
645 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
646 len = sprintf(buf, "%d\n",
647 ((ha->ip_config.ipv6_vlan_tag >> 13) &
648 ISCSI_MAX_VLAN_PRIORITY));
649 break;
650 case ISCSI_NET_PARAM_VLAN_ENABLED:
651 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
652 len = sprintf(buf, "%s\n",
653 (ha->ip_config.ipv4_options &
654 IPOPT_VLAN_TAGGING_ENABLE) ?
655 "enabled" : "disabled");
656 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
657 len = sprintf(buf, "%s\n",
658 (ha->ip_config.ipv6_options &
659 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
660 "enabled" : "disabled");
661 break;
943c157b
VC
662 case ISCSI_NET_PARAM_MTU:
663 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
664 break;
2ada7fc5
VC
665 case ISCSI_NET_PARAM_PORT:
666 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
667 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
668 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
669 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
670 break;
ed1086e0
VC
671 default:
672 len = -ENOSYS;
673 }
674
675 return len;
676}
677
b3a271a9
MR
678static struct iscsi_endpoint *
679qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
680 int non_blocking)
5c656af7 681{
b3a271a9
MR
682 int ret;
683 struct iscsi_endpoint *ep;
684 struct qla_endpoint *qla_ep;
685 struct scsi_qla_host *ha;
686 struct sockaddr_in *addr;
687 struct sockaddr_in6 *addr6;
5c656af7 688
b3a271a9
MR
689 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
690 if (!shost) {
691 ret = -ENXIO;
692 printk(KERN_ERR "%s: shost is NULL\n",
693 __func__);
694 return ERR_PTR(ret);
695 }
5c656af7 696
b3a271a9
MR
697 ha = iscsi_host_priv(shost);
698
699 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
700 if (!ep) {
701 ret = -ENOMEM;
702 return ERR_PTR(ret);
703 }
704
705 qla_ep = ep->dd_data;
706 memset(qla_ep, 0, sizeof(struct qla_endpoint));
707 if (dst_addr->sa_family == AF_INET) {
708 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
709 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
710 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
711 (char *)&addr->sin_addr));
712 } else if (dst_addr->sa_family == AF_INET6) {
713 memcpy(&qla_ep->dst_addr, dst_addr,
714 sizeof(struct sockaddr_in6));
715 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
716 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
717 (char *)&addr6->sin6_addr));
718 }
719
720 qla_ep->host = shost;
721
722 return ep;
5c656af7
MC
723}
724
b3a271a9 725static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
afaf5a2d 726{
b3a271a9
MR
727 struct qla_endpoint *qla_ep;
728 struct scsi_qla_host *ha;
729 int ret = 0;
afaf5a2d 730
b3a271a9
MR
731 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
732 qla_ep = ep->dd_data;
733 ha = to_qla_host(qla_ep->host);
734
13483730 735 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
b3a271a9
MR
736 ret = 1;
737
738 return ret;
739}
740
741static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
742{
743 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
744 iscsi_destroy_endpoint(ep);
745}
746
747static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
748 enum iscsi_param param,
749 char *buf)
750{
751 struct qla_endpoint *qla_ep = ep->dd_data;
752 struct sockaddr *dst_addr;
753
754 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
755
756 switch (param) {
757 case ISCSI_PARAM_CONN_PORT:
758 case ISCSI_PARAM_CONN_ADDRESS:
759 if (!qla_ep)
760 return -ENOTCONN;
761
762 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
763 if (!dst_addr)
764 return -ENOTCONN;
765
766 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
767 &qla_ep->dst_addr, param, buf);
768 default:
769 return -ENOSYS;
770 }
771}
772
773static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
774 struct iscsi_stats *stats)
775{
776 struct iscsi_session *sess;
777 struct iscsi_cls_session *cls_sess;
778 struct ddb_entry *ddb_entry;
779 struct scsi_qla_host *ha;
780 struct ql_iscsi_stats *ql_iscsi_stats;
781 int stats_size;
782 int ret;
783 dma_addr_t iscsi_stats_dma;
784
785 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
568d303b 786
b3a271a9
MR
787 cls_sess = iscsi_conn_to_session(cls_conn);
788 sess = cls_sess->dd_data;
789 ddb_entry = sess->dd_data;
790 ha = ddb_entry->ha;
791
792 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
793 /* Allocate memory */
794 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
795 &iscsi_stats_dma, GFP_KERNEL);
796 if (!ql_iscsi_stats) {
797 ql4_printk(KERN_ERR, ha,
798 "Unable to allocate memory for iscsi stats\n");
799 goto exit_get_stats;
568d303b 800 }
b3a271a9
MR
801
802 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
803 iscsi_stats_dma);
804 if (ret != QLA_SUCCESS) {
805 ql4_printk(KERN_ERR, ha,
806 "Unable to retreive iscsi stats\n");
807 goto free_stats;
808 }
809
810 /* octets */
811 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
812 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
813 /* xmit pdus */
814 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
815 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
816 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
817 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
818 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
819 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
820 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
821 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
822 /* recv pdus */
823 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
824 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
825 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
826 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
827 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
828 stats->logoutrsp_pdus =
829 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
830 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
831 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
832 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
833
834free_stats:
835 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
836 iscsi_stats_dma);
837exit_get_stats:
838 return;
839}
840
841static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
842{
843 struct iscsi_cls_session *session;
844 struct iscsi_session *sess;
845 unsigned long flags;
846 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
847
848 session = starget_to_session(scsi_target(sc->device));
849 sess = session->dd_data;
850
851 spin_lock_irqsave(&session->lock, flags);
852 if (session->state == ISCSI_SESSION_FAILED)
853 ret = BLK_EH_RESET_TIMER;
854 spin_unlock_irqrestore(&session->lock, flags);
855
856 return ret;
afaf5a2d
DS
857}
858
3254dbe9
VC
859static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
860{
861 struct scsi_qla_host *ha = to_qla_host(shost);
e16d166e 862 struct iscsi_cls_host *ihost = shost->shost_data;
3254dbe9
VC
863 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
864
865 qla4xxx_get_firmware_state(ha);
866
867 switch (ha->addl_fw_state & 0x0F00) {
868 case FW_ADDSTATE_LINK_SPEED_10MBPS:
869 speed = ISCSI_PORT_SPEED_10MBPS;
870 break;
871 case FW_ADDSTATE_LINK_SPEED_100MBPS:
872 speed = ISCSI_PORT_SPEED_100MBPS;
873 break;
874 case FW_ADDSTATE_LINK_SPEED_1GBPS:
875 speed = ISCSI_PORT_SPEED_1GBPS;
876 break;
877 case FW_ADDSTATE_LINK_SPEED_10GBPS:
878 speed = ISCSI_PORT_SPEED_10GBPS;
879 break;
880 }
881 ihost->port_speed = speed;
882}
883
884static void qla4xxx_set_port_state(struct Scsi_Host *shost)
885{
886 struct scsi_qla_host *ha = to_qla_host(shost);
e16d166e 887 struct iscsi_cls_host *ihost = shost->shost_data;
3254dbe9
VC
888 uint32_t state = ISCSI_PORT_STATE_DOWN;
889
890 if (test_bit(AF_LINK_UP, &ha->flags))
891 state = ISCSI_PORT_STATE_UP;
892
893 ihost->port_state = state;
894}
895
aa1e93a2
MC
896static int qla4xxx_host_get_param(struct Scsi_Host *shost,
897 enum iscsi_host_param param, char *buf)
898{
899 struct scsi_qla_host *ha = to_qla_host(shost);
900 int len;
901
902 switch (param) {
903 case ISCSI_HOST_PARAM_HWADDRESS:
7ffc49a6 904 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
8ad5781a 905 break;
22236961 906 case ISCSI_HOST_PARAM_IPADDRESS:
2bab08fc 907 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
22236961 908 break;
8ad5781a 909 case ISCSI_HOST_PARAM_INITIATOR_NAME:
22236961 910 len = sprintf(buf, "%s\n", ha->name_string);
aa1e93a2 911 break;
3254dbe9
VC
912 case ISCSI_HOST_PARAM_PORT_STATE:
913 qla4xxx_set_port_state(shost);
914 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
915 break;
916 case ISCSI_HOST_PARAM_PORT_SPEED:
917 qla4xxx_set_port_speed(shost);
918 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
919 break;
aa1e93a2
MC
920 default:
921 return -ENOSYS;
922 }
923
924 return len;
925}
926
ed1086e0
VC
927static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
928{
929 if (ha->iface_ipv4)
930 return;
931
932 /* IPv4 */
933 ha->iface_ipv4 = iscsi_create_iface(ha->host,
934 &qla4xxx_iscsi_transport,
935 ISCSI_IFACE_TYPE_IPV4, 0, 0);
936 if (!ha->iface_ipv4)
937 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
938 "iface0.\n");
939}
940
941static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
942{
943 if (!ha->iface_ipv6_0)
944 /* IPv6 iface-0 */
945 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
946 &qla4xxx_iscsi_transport,
947 ISCSI_IFACE_TYPE_IPV6, 0,
948 0);
949 if (!ha->iface_ipv6_0)
950 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
951 "iface0.\n");
952
953 if (!ha->iface_ipv6_1)
954 /* IPv6 iface-1 */
955 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
956 &qla4xxx_iscsi_transport,
957 ISCSI_IFACE_TYPE_IPV6, 1,
958 0);
959 if (!ha->iface_ipv6_1)
960 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
961 "iface1.\n");
962}
963
964static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
965{
966 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
967 qla4xxx_create_ipv4_iface(ha);
968
969 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
970 qla4xxx_create_ipv6_iface(ha);
971}
972
973static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
974{
975 if (ha->iface_ipv4) {
976 iscsi_destroy_iface(ha->iface_ipv4);
977 ha->iface_ipv4 = NULL;
978 }
979}
980
981static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
982{
983 if (ha->iface_ipv6_0) {
984 iscsi_destroy_iface(ha->iface_ipv6_0);
985 ha->iface_ipv6_0 = NULL;
986 }
987 if (ha->iface_ipv6_1) {
988 iscsi_destroy_iface(ha->iface_ipv6_1);
989 ha->iface_ipv6_1 = NULL;
990 }
991}
992
993static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
994{
995 qla4xxx_destroy_ipv4_iface(ha);
996 qla4xxx_destroy_ipv6_iface(ha);
997}
998
d00efe3f
MC
999static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1000 struct iscsi_iface_param_info *iface_param,
1001 struct addr_ctrl_blk *init_fw_cb)
1002{
1003 /*
1004 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1005 * iface_num 1 is valid only for IPv6 Addr.
1006 */
1007 switch (iface_param->param) {
1008 case ISCSI_NET_PARAM_IPV6_ADDR:
1009 if (iface_param->iface_num & 0x1)
1010 /* IPv6 Addr 1 */
1011 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1012 sizeof(init_fw_cb->ipv6_addr1));
1013 else
1014 /* IPv6 Addr 0 */
1015 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1016 sizeof(init_fw_cb->ipv6_addr0));
1017 break;
1018 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1019 if (iface_param->iface_num & 0x1)
1020 break;
1021 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1022 sizeof(init_fw_cb->ipv6_if_id));
1023 break;
1024 case ISCSI_NET_PARAM_IPV6_ROUTER:
1025 if (iface_param->iface_num & 0x1)
1026 break;
1027 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1028 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1029 break;
1030 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1031 /* Autocfg applies to even interface */
1032 if (iface_param->iface_num & 0x1)
1033 break;
1034
1035 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1036 init_fw_cb->ipv6_addtl_opts &=
1037 cpu_to_le16(
1038 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1039 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1040 init_fw_cb->ipv6_addtl_opts |=
1041 cpu_to_le16(
1042 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1043 else
1044 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1045 "IPv6 addr\n");
1046 break;
1047 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1048 /* Autocfg applies to even interface */
1049 if (iface_param->iface_num & 0x1)
1050 break;
1051
1052 if (iface_param->value[0] ==
1053 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1054 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1055 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1056 else if (iface_param->value[0] ==
1057 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1058 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1059 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1060 else
1061 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1062 "IPv6 linklocal addr\n");
1063 break;
1064 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1065 /* Autocfg applies to even interface */
1066 if (iface_param->iface_num & 0x1)
1067 break;
1068
1069 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1070 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1071 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1072 break;
1073 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 1074 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f
MC
1075 init_fw_cb->ipv6_opts |=
1076 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
ed1086e0
VC
1077 qla4xxx_create_ipv6_iface(ha);
1078 } else {
d00efe3f
MC
1079 init_fw_cb->ipv6_opts &=
1080 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1081 0xFFFF);
ed1086e0
VC
1082 qla4xxx_destroy_ipv6_iface(ha);
1083 }
d00efe3f 1084 break;
2d63673b 1085 case ISCSI_NET_PARAM_VLAN_TAG:
d00efe3f
MC
1086 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1087 break;
6ac73e8c
VC
1088 init_fw_cb->ipv6_vlan_tag =
1089 cpu_to_be16(*(uint16_t *)iface_param->value);
1090 break;
1091 case ISCSI_NET_PARAM_VLAN_ENABLED:
1092 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1093 init_fw_cb->ipv6_opts |=
1094 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1095 else
1096 init_fw_cb->ipv6_opts &=
1097 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
d00efe3f 1098 break;
943c157b
VC
1099 case ISCSI_NET_PARAM_MTU:
1100 init_fw_cb->eth_mtu_size =
1101 cpu_to_le16(*(uint16_t *)iface_param->value);
1102 break;
2ada7fc5
VC
1103 case ISCSI_NET_PARAM_PORT:
1104 /* Autocfg applies to even interface */
1105 if (iface_param->iface_num & 0x1)
1106 break;
1107
1108 init_fw_cb->ipv6_port =
1109 cpu_to_le16(*(uint16_t *)iface_param->value);
1110 break;
d00efe3f
MC
1111 default:
1112 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1113 iface_param->param);
1114 break;
1115 }
1116}
1117
1118static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1119 struct iscsi_iface_param_info *iface_param,
1120 struct addr_ctrl_blk *init_fw_cb)
1121{
1122 switch (iface_param->param) {
1123 case ISCSI_NET_PARAM_IPV4_ADDR:
1124 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1125 sizeof(init_fw_cb->ipv4_addr));
1126 break;
1127 case ISCSI_NET_PARAM_IPV4_SUBNET:
1128 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
1129 sizeof(init_fw_cb->ipv4_subnet));
1130 break;
1131 case ISCSI_NET_PARAM_IPV4_GW:
1132 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1133 sizeof(init_fw_cb->ipv4_gw_addr));
1134 break;
1135 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1136 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1137 init_fw_cb->ipv4_tcp_opts |=
1138 cpu_to_le16(TCPOPT_DHCP_ENABLE);
1139 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1140 init_fw_cb->ipv4_tcp_opts &=
1141 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1142 else
1143 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1144 break;
1145 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 1146 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f 1147 init_fw_cb->ipv4_ip_opts |=
2bab08fc 1148 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
ed1086e0
VC
1149 qla4xxx_create_ipv4_iface(ha);
1150 } else {
d00efe3f 1151 init_fw_cb->ipv4_ip_opts &=
2bab08fc 1152 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
d00efe3f 1153 0xFFFF);
ed1086e0
VC
1154 qla4xxx_destroy_ipv4_iface(ha);
1155 }
d00efe3f 1156 break;
2d63673b 1157 case ISCSI_NET_PARAM_VLAN_TAG:
d00efe3f
MC
1158 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1159 break;
6ac73e8c
VC
1160 init_fw_cb->ipv4_vlan_tag =
1161 cpu_to_be16(*(uint16_t *)iface_param->value);
1162 break;
1163 case ISCSI_NET_PARAM_VLAN_ENABLED:
1164 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1165 init_fw_cb->ipv4_ip_opts |=
1166 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1167 else
1168 init_fw_cb->ipv4_ip_opts &=
1169 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
d00efe3f 1170 break;
943c157b
VC
1171 case ISCSI_NET_PARAM_MTU:
1172 init_fw_cb->eth_mtu_size =
1173 cpu_to_le16(*(uint16_t *)iface_param->value);
1174 break;
2ada7fc5
VC
1175 case ISCSI_NET_PARAM_PORT:
1176 init_fw_cb->ipv4_port =
1177 cpu_to_le16(*(uint16_t *)iface_param->value);
1178 break;
d00efe3f
MC
1179 default:
1180 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1181 iface_param->param);
1182 break;
1183 }
1184}
1185
1186static void
1187qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1188{
1189 struct addr_ctrl_blk_def *acb;
1190 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1191 memset(acb->reserved1, 0, sizeof(acb->reserved1));
1192 memset(acb->reserved2, 0, sizeof(acb->reserved2));
1193 memset(acb->reserved3, 0, sizeof(acb->reserved3));
1194 memset(acb->reserved4, 0, sizeof(acb->reserved4));
1195 memset(acb->reserved5, 0, sizeof(acb->reserved5));
1196 memset(acb->reserved6, 0, sizeof(acb->reserved6));
1197 memset(acb->reserved7, 0, sizeof(acb->reserved7));
1198 memset(acb->reserved8, 0, sizeof(acb->reserved8));
1199 memset(acb->reserved9, 0, sizeof(acb->reserved9));
1200 memset(acb->reserved10, 0, sizeof(acb->reserved10));
1201 memset(acb->reserved11, 0, sizeof(acb->reserved11));
1202 memset(acb->reserved12, 0, sizeof(acb->reserved12));
1203 memset(acb->reserved13, 0, sizeof(acb->reserved13));
1204 memset(acb->reserved14, 0, sizeof(acb->reserved14));
1205 memset(acb->reserved15, 0, sizeof(acb->reserved15));
1206}
1207
1208static int
00c31889 1209qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
d00efe3f
MC
1210{
1211 struct scsi_qla_host *ha = to_qla_host(shost);
1212 int rval = 0;
1213 struct iscsi_iface_param_info *iface_param = NULL;
1214 struct addr_ctrl_blk *init_fw_cb = NULL;
1215 dma_addr_t init_fw_cb_dma;
1216 uint32_t mbox_cmd[MBOX_REG_COUNT];
1217 uint32_t mbox_sts[MBOX_REG_COUNT];
00c31889
MC
1218 uint32_t rem = len;
1219 struct nlattr *attr;
d00efe3f
MC
1220
1221 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1222 sizeof(struct addr_ctrl_blk),
1223 &init_fw_cb_dma, GFP_KERNEL);
1224 if (!init_fw_cb) {
1225 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1226 __func__);
1227 return -ENOMEM;
1228 }
1229
1230 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1231 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1232 memset(&mbox_sts, 0, sizeof(mbox_sts));
1233
1234 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1235 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1236 rval = -EIO;
1237 goto exit_init_fw_cb;
1238 }
1239
00c31889
MC
1240 nla_for_each_attr(attr, data, len, rem) {
1241 iface_param = nla_data(attr);
d00efe3f
MC
1242
1243 if (iface_param->param_type != ISCSI_NET_PARAM)
1244 continue;
1245
1246 switch (iface_param->iface_type) {
1247 case ISCSI_IFACE_TYPE_IPV4:
1248 switch (iface_param->iface_num) {
1249 case 0:
1250 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1251 break;
1252 default:
1253 /* Cannot have more than one IPv4 interface */
1254 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1255 "number = %d\n",
1256 iface_param->iface_num);
1257 break;
1258 }
1259 break;
1260 case ISCSI_IFACE_TYPE_IPV6:
1261 switch (iface_param->iface_num) {
1262 case 0:
1263 case 1:
1264 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1265 break;
1266 default:
1267 /* Cannot have more than two IPv6 interface */
1268 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1269 "number = %d\n",
1270 iface_param->iface_num);
1271 break;
1272 }
1273 break;
1274 default:
1275 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1276 break;
1277 }
d00efe3f
MC
1278 }
1279
1280 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1281
1282 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1283 sizeof(struct addr_ctrl_blk),
1284 FLASH_OPT_RMW_COMMIT);
1285 if (rval != QLA_SUCCESS) {
1286 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1287 __func__);
1288 rval = -EIO;
1289 goto exit_init_fw_cb;
1290 }
1291
ce505f9d
VC
1292 rval = qla4xxx_disable_acb(ha);
1293 if (rval != QLA_SUCCESS) {
1294 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1295 __func__);
1296 rval = -EIO;
1297 goto exit_init_fw_cb;
1298 }
1299
1300 wait_for_completion_timeout(&ha->disable_acb_comp,
1301 DISABLE_ACB_TOV * HZ);
d00efe3f
MC
1302
1303 qla4xxx_initcb_to_acb(init_fw_cb);
1304
1305 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1306 if (rval != QLA_SUCCESS) {
1307 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1308 __func__);
1309 rval = -EIO;
1310 goto exit_init_fw_cb;
1311 }
1312
1313 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1314 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1315 init_fw_cb_dma);
1316
1317exit_init_fw_cb:
1318 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1319 init_fw_cb, init_fw_cb_dma);
1320
1321 return rval;
1322}
1323
fca9f04d
MC
1324static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1325 enum iscsi_param param, char *buf)
1326{
1327 struct iscsi_session *sess = cls_sess->dd_data;
1328 struct ddb_entry *ddb_entry = sess->dd_data;
1329 struct scsi_qla_host *ha = ddb_entry->ha;
1330 int rval, len;
1331 uint16_t idx;
1332
1333 switch (param) {
1334 case ISCSI_PARAM_CHAP_IN_IDX:
1335 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1336 sess->password_in, BIDI_CHAP,
1337 &idx);
1338 if (rval)
1339 return -EINVAL;
1340
1341 len = sprintf(buf, "%hu\n", idx);
1342 break;
1343 case ISCSI_PARAM_CHAP_OUT_IDX:
1344 rval = qla4xxx_get_chap_index(ha, sess->username,
1345 sess->password, LOCAL_CHAP,
1346 &idx);
1347 if (rval)
1348 return -EINVAL;
1349
1350 len = sprintf(buf, "%hu\n", idx);
1351 break;
1352 default:
1353 return iscsi_session_get_param(cls_sess, param, buf);
1354 }
1355
1356 return len;
1357}
1358
b3a271a9 1359static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
afaf5a2d
DS
1360 enum iscsi_param param, char *buf)
1361{
b3a271a9
MR
1362 struct iscsi_conn *conn;
1363 struct qla_conn *qla_conn;
1364 struct sockaddr *dst_addr;
1365 int len = 0;
afaf5a2d 1366
b3a271a9
MR
1367 conn = cls_conn->dd_data;
1368 qla_conn = conn->dd_data;
d46bdeb1 1369 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
afaf5a2d
DS
1370
1371 switch (param) {
1372 case ISCSI_PARAM_CONN_PORT:
afaf5a2d 1373 case ISCSI_PARAM_CONN_ADDRESS:
b3a271a9
MR
1374 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1375 dst_addr, param, buf);
afaf5a2d 1376 default:
b3a271a9 1377 return iscsi_conn_get_param(cls_conn, param, buf);
afaf5a2d
DS
1378 }
1379
1380 return len;
b3a271a9 1381
afaf5a2d
DS
1382}
1383
13483730
MC
1384int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1385{
1386 uint32_t mbx_sts = 0;
1387 uint16_t tmp_ddb_index;
1388 int ret;
1389
1390get_ddb_index:
1391 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1392
1393 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1394 DEBUG2(ql4_printk(KERN_INFO, ha,
1395 "Free DDB index not available\n"));
1396 ret = QLA_ERROR;
1397 goto exit_get_ddb_index;
1398 }
1399
1400 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1401 goto get_ddb_index;
1402
1403 DEBUG2(ql4_printk(KERN_INFO, ha,
1404 "Found a free DDB index at %d\n", tmp_ddb_index));
1405 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1406 if (ret == QLA_ERROR) {
1407 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1408 ql4_printk(KERN_INFO, ha,
1409 "DDB index = %d not available trying next\n",
1410 tmp_ddb_index);
1411 goto get_ddb_index;
1412 }
1413 DEBUG2(ql4_printk(KERN_INFO, ha,
1414 "Free FW DDB not available\n"));
1415 }
1416
1417 *ddb_index = tmp_ddb_index;
1418
1419exit_get_ddb_index:
1420 return ret;
1421}
1422
1423static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1424 struct ddb_entry *ddb_entry,
1425 char *existing_ipaddr,
1426 char *user_ipaddr)
1427{
1428 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1429 char formatted_ipaddr[DDB_IPADDR_LEN];
1430 int status = QLA_SUCCESS, ret = 0;
1431
1432 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1433 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1434 '\0', NULL);
1435 if (ret == 0) {
1436 status = QLA_ERROR;
1437 goto out_match;
1438 }
1439 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1440 } else {
1441 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1442 '\0', NULL);
1443 if (ret == 0) {
1444 status = QLA_ERROR;
1445 goto out_match;
1446 }
1447 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1448 }
1449
1450 if (strcmp(existing_ipaddr, formatted_ipaddr))
1451 status = QLA_ERROR;
1452
1453out_match:
1454 return status;
1455}
1456
1457static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1458 struct iscsi_cls_conn *cls_conn)
1459{
1460 int idx = 0, max_ddbs, rval;
1461 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1462 struct iscsi_session *sess, *existing_sess;
1463 struct iscsi_conn *conn, *existing_conn;
1464 struct ddb_entry *ddb_entry;
1465
1466 sess = cls_sess->dd_data;
1467 conn = cls_conn->dd_data;
1468
1469 if (sess->targetname == NULL ||
1470 conn->persistent_address == NULL ||
1471 conn->persistent_port == 0)
1472 return QLA_ERROR;
1473
1474 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1475 MAX_DEV_DB_ENTRIES;
1476
1477 for (idx = 0; idx < max_ddbs; idx++) {
1478 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1479 if (ddb_entry == NULL)
1480 continue;
1481
1482 if (ddb_entry->ddb_type != FLASH_DDB)
1483 continue;
1484
1485 existing_sess = ddb_entry->sess->dd_data;
1486 existing_conn = ddb_entry->conn->dd_data;
1487
1488 if (existing_sess->targetname == NULL ||
1489 existing_conn->persistent_address == NULL ||
1490 existing_conn->persistent_port == 0)
1491 continue;
1492
1493 DEBUG2(ql4_printk(KERN_INFO, ha,
1494 "IQN = %s User IQN = %s\n",
1495 existing_sess->targetname,
1496 sess->targetname));
1497
1498 DEBUG2(ql4_printk(KERN_INFO, ha,
1499 "IP = %s User IP = %s\n",
1500 existing_conn->persistent_address,
1501 conn->persistent_address));
1502
1503 DEBUG2(ql4_printk(KERN_INFO, ha,
1504 "Port = %d User Port = %d\n",
1505 existing_conn->persistent_port,
1506 conn->persistent_port));
1507
1508 if (strcmp(existing_sess->targetname, sess->targetname))
1509 continue;
1510 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1511 existing_conn->persistent_address,
1512 conn->persistent_address);
1513 if (rval == QLA_ERROR)
1514 continue;
1515 if (existing_conn->persistent_port != conn->persistent_port)
1516 continue;
1517 break;
1518 }
1519
1520 if (idx == max_ddbs)
1521 return QLA_ERROR;
1522
1523 DEBUG2(ql4_printk(KERN_INFO, ha,
1524 "Match found in fwdb sessions\n"));
1525 return QLA_SUCCESS;
1526}
1527
b3a271a9
MR
1528static struct iscsi_cls_session *
1529qla4xxx_session_create(struct iscsi_endpoint *ep,
1530 uint16_t cmds_max, uint16_t qdepth,
1531 uint32_t initial_cmdsn)
1532{
1533 struct iscsi_cls_session *cls_sess;
1534 struct scsi_qla_host *ha;
1535 struct qla_endpoint *qla_ep;
1536 struct ddb_entry *ddb_entry;
13483730 1537 uint16_t ddb_index;
b3a271a9
MR
1538 struct iscsi_session *sess;
1539 struct sockaddr *dst_addr;
1540 int ret;
1541
1542 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1543 if (!ep) {
1544 printk(KERN_ERR "qla4xxx: missing ep.\n");
1545 return NULL;
1546 }
1547
1548 qla_ep = ep->dd_data;
1549 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1550 ha = to_qla_host(qla_ep->host);
736cf369 1551
13483730
MC
1552 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1553 if (ret == QLA_ERROR)
b3a271a9 1554 return NULL;
b3a271a9
MR
1555
1556 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1557 cmds_max, sizeof(struct ddb_entry),
1558 sizeof(struct ql4_task_data),
1559 initial_cmdsn, ddb_index);
1560 if (!cls_sess)
1561 return NULL;
1562
1563 sess = cls_sess->dd_data;
1564 ddb_entry = sess->dd_data;
1565 ddb_entry->fw_ddb_index = ddb_index;
1566 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1567 ddb_entry->ha = ha;
1568 ddb_entry->sess = cls_sess;
13483730
MC
1569 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1570 ddb_entry->ddb_change = qla4xxx_ddb_change;
b3a271a9
MR
1571 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1572 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1573 ha->tot_ddbs++;
1574
1575 return cls_sess;
1576}
1577
1578static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1579{
1580 struct iscsi_session *sess;
1581 struct ddb_entry *ddb_entry;
1582 struct scsi_qla_host *ha;
90599b62
MR
1583 unsigned long flags, wtime;
1584 struct dev_db_entry *fw_ddb_entry = NULL;
1585 dma_addr_t fw_ddb_entry_dma;
1586 uint32_t ddb_state;
1587 int ret;
b3a271a9
MR
1588
1589 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1590 sess = cls_sess->dd_data;
1591 ddb_entry = sess->dd_data;
1592 ha = ddb_entry->ha;
1593
90599b62
MR
1594 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1595 &fw_ddb_entry_dma, GFP_KERNEL);
1596 if (!fw_ddb_entry) {
1597 ql4_printk(KERN_ERR, ha,
1598 "%s: Unable to allocate dma buffer\n", __func__);
1599 goto destroy_session;
1600 }
1601
1602 wtime = jiffies + (HZ * LOGOUT_TOV);
1603 do {
1604 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1605 fw_ddb_entry, fw_ddb_entry_dma,
1606 NULL, NULL, &ddb_state, NULL,
1607 NULL, NULL);
1608 if (ret == QLA_ERROR)
1609 goto destroy_session;
1610
1611 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1612 (ddb_state == DDB_DS_SESSION_FAILED))
1613 goto destroy_session;
1614
1615 schedule_timeout_uninterruptible(HZ);
1616 } while ((time_after(wtime, jiffies)));
1617
1618destroy_session:
736cf369
MR
1619 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1620
b3a271a9
MR
1621 spin_lock_irqsave(&ha->hardware_lock, flags);
1622 qla4xxx_free_ddb(ha, ddb_entry);
1623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
90599b62 1624
b3a271a9 1625 iscsi_session_teardown(cls_sess);
90599b62
MR
1626
1627 if (fw_ddb_entry)
1628 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1629 fw_ddb_entry, fw_ddb_entry_dma);
b3a271a9
MR
1630}
1631
b3a271a9
MR
1632static struct iscsi_cls_conn *
1633qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1634{
1635 struct iscsi_cls_conn *cls_conn;
1636 struct iscsi_session *sess;
1637 struct ddb_entry *ddb_entry;
1638
1639 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1640 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1641 conn_idx);
ff1d0319
MC
1642 if (!cls_conn)
1643 return NULL;
1644
b3a271a9
MR
1645 sess = cls_sess->dd_data;
1646 ddb_entry = sess->dd_data;
1647 ddb_entry->conn = cls_conn;
1648
1649 return cls_conn;
1650}
1651
1652static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1653 struct iscsi_cls_conn *cls_conn,
1654 uint64_t transport_fd, int is_leading)
1655{
1656 struct iscsi_conn *conn;
1657 struct qla_conn *qla_conn;
1658 struct iscsi_endpoint *ep;
1659
1660 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1661
1662 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1663 return -EINVAL;
1664 ep = iscsi_lookup_endpoint(transport_fd);
1665 conn = cls_conn->dd_data;
1666 qla_conn = conn->dd_data;
1667 qla_conn->qla_ep = ep->dd_data;
1668 return 0;
1669}
1670
1671static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1672{
1673 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1674 struct iscsi_session *sess;
1675 struct ddb_entry *ddb_entry;
1676 struct scsi_qla_host *ha;
13483730 1677 struct dev_db_entry *fw_ddb_entry = NULL;
b3a271a9 1678 dma_addr_t fw_ddb_entry_dma;
b3a271a9
MR
1679 uint32_t mbx_sts = 0;
1680 int ret = 0;
1681 int status = QLA_SUCCESS;
1682
1683 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1684 sess = cls_sess->dd_data;
1685 ddb_entry = sess->dd_data;
1686 ha = ddb_entry->ha;
1687
13483730
MC
1688 /* Check if we have matching FW DDB, if yes then do not
1689 * login to this target. This could cause target to logout previous
1690 * connection
1691 */
1692 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1693 if (ret == QLA_SUCCESS) {
1694 ql4_printk(KERN_INFO, ha,
1695 "Session already exist in FW.\n");
1696 ret = -EEXIST;
1697 goto exit_conn_start;
1698 }
1699
b3a271a9
MR
1700 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1701 &fw_ddb_entry_dma, GFP_KERNEL);
1702 if (!fw_ddb_entry) {
1703 ql4_printk(KERN_ERR, ha,
1704 "%s: Unable to allocate dma buffer\n", __func__);
13483730
MC
1705 ret = -ENOMEM;
1706 goto exit_conn_start;
b3a271a9
MR
1707 }
1708
1709 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1710 if (ret) {
1711 /* If iscsid is stopped and started then no need to do
1712 * set param again since ddb state will be already
1713 * active and FW does not allow set ddb to an
1714 * active session.
1715 */
1716 if (mbx_sts)
1717 if (ddb_entry->fw_ddb_device_state ==
f922da79 1718 DDB_DS_SESSION_ACTIVE) {
13483730 1719 ddb_entry->unblock_sess(ddb_entry->sess);
b3a271a9 1720 goto exit_set_param;
f922da79 1721 }
b3a271a9
MR
1722
1723 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1724 __func__, ddb_entry->fw_ddb_index);
1725 goto exit_conn_start;
1726 }
1727
1728 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1729 if (status == QLA_ERROR) {
0e7e8501
MR
1730 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1731 sess->targetname);
b3a271a9
MR
1732 ret = -EINVAL;
1733 goto exit_conn_start;
1734 }
1735
98270ab4
MR
1736 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1737 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1738
1739 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1740 ddb_entry->fw_ddb_device_state));
b3a271a9
MR
1741
1742exit_set_param:
b3a271a9
MR
1743 ret = 0;
1744
1745exit_conn_start:
13483730
MC
1746 if (fw_ddb_entry)
1747 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1748 fw_ddb_entry, fw_ddb_entry_dma);
b3a271a9
MR
1749 return ret;
1750}
1751
1752static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1753{
1754 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1755 struct iscsi_session *sess;
1756 struct scsi_qla_host *ha;
1757 struct ddb_entry *ddb_entry;
1758 int options;
1759
1760 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1761 sess = cls_sess->dd_data;
1762 ddb_entry = sess->dd_data;
1763 ha = ddb_entry->ha;
1764
1765 options = LOGOUT_OPTION_CLOSE_SESSION;
1766 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1767 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
b3a271a9
MR
1768}
1769
1770static void qla4xxx_task_work(struct work_struct *wdata)
1771{
1772 struct ql4_task_data *task_data;
1773 struct scsi_qla_host *ha;
1774 struct passthru_status *sts;
1775 struct iscsi_task *task;
1776 struct iscsi_hdr *hdr;
1777 uint8_t *data;
1778 uint32_t data_len;
1779 struct iscsi_conn *conn;
1780 int hdr_len;
1781 itt_t itt;
1782
1783 task_data = container_of(wdata, struct ql4_task_data, task_work);
1784 ha = task_data->ha;
1785 task = task_data->task;
1786 sts = &task_data->sts;
1787 hdr_len = sizeof(struct iscsi_hdr);
1788
1789 DEBUG3(printk(KERN_INFO "Status returned\n"));
1790 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1791 DEBUG3(printk(KERN_INFO "Response buffer"));
1792 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1793
1794 conn = task->conn;
1795
1796 switch (sts->completionStatus) {
1797 case PASSTHRU_STATUS_COMPLETE:
1798 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1799 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1800 itt = sts->handle;
1801 hdr->itt = itt;
1802 data = task_data->resp_buffer + hdr_len;
1803 data_len = task_data->resp_len - hdr_len;
1804 iscsi_complete_pdu(conn, hdr, data, data_len);
1805 break;
1806 default:
1807 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1808 sts->completionStatus);
1809 break;
1810 }
1811 return;
1812}
1813
1814static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1815{
1816 struct ql4_task_data *task_data;
1817 struct iscsi_session *sess;
1818 struct ddb_entry *ddb_entry;
1819 struct scsi_qla_host *ha;
1820 int hdr_len;
1821
1822 sess = task->conn->session;
1823 ddb_entry = sess->dd_data;
1824 ha = ddb_entry->ha;
1825 task_data = task->dd_data;
1826 memset(task_data, 0, sizeof(struct ql4_task_data));
1827
1828 if (task->sc) {
1829 ql4_printk(KERN_INFO, ha,
1830 "%s: SCSI Commands not implemented\n", __func__);
1831 return -EINVAL;
1832 }
1833
1834 hdr_len = sizeof(struct iscsi_hdr);
1835 task_data->ha = ha;
1836 task_data->task = task;
1837
1838 if (task->data_count) {
1839 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1840 task->data_count,
1841 PCI_DMA_TODEVICE);
1842 }
1843
1844 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1845 __func__, task->conn->max_recv_dlength, hdr_len));
1846
69ca216e 1847 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
b3a271a9
MR
1848 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1849 task_data->resp_len,
1850 &task_data->resp_dma,
1851 GFP_ATOMIC);
1852 if (!task_data->resp_buffer)
1853 goto exit_alloc_pdu;
1854
69ca216e 1855 task_data->req_len = task->data_count + hdr_len;
b3a271a9 1856 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
69ca216e 1857 task_data->req_len,
b3a271a9
MR
1858 &task_data->req_dma,
1859 GFP_ATOMIC);
1860 if (!task_data->req_buffer)
1861 goto exit_alloc_pdu;
1862
1863 task->hdr = task_data->req_buffer;
1864
1865 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1866
1867 return 0;
1868
1869exit_alloc_pdu:
1870 if (task_data->resp_buffer)
1871 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1872 task_data->resp_buffer, task_data->resp_dma);
1873
1874 if (task_data->req_buffer)
69ca216e 1875 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
b3a271a9
MR
1876 task_data->req_buffer, task_data->req_dma);
1877 return -ENOMEM;
1878}
1879
1880static void qla4xxx_task_cleanup(struct iscsi_task *task)
1881{
1882 struct ql4_task_data *task_data;
1883 struct iscsi_session *sess;
1884 struct ddb_entry *ddb_entry;
1885 struct scsi_qla_host *ha;
1886 int hdr_len;
1887
1888 hdr_len = sizeof(struct iscsi_hdr);
1889 sess = task->conn->session;
1890 ddb_entry = sess->dd_data;
1891 ha = ddb_entry->ha;
1892 task_data = task->dd_data;
1893
1894 if (task->data_count) {
1895 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1896 task->data_count, PCI_DMA_TODEVICE);
1897 }
1898
1899 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1900 __func__, task->conn->max_recv_dlength, hdr_len));
1901
1902 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1903 task_data->resp_buffer, task_data->resp_dma);
69ca216e 1904 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
b3a271a9
MR
1905 task_data->req_buffer, task_data->req_dma);
1906 return;
1907}
1908
1909static int qla4xxx_task_xmit(struct iscsi_task *task)
1910{
1911 struct scsi_cmnd *sc = task->sc;
1912 struct iscsi_session *sess = task->conn->session;
1913 struct ddb_entry *ddb_entry = sess->dd_data;
1914 struct scsi_qla_host *ha = ddb_entry->ha;
1915
1916 if (!sc)
1917 return qla4xxx_send_passthru0(task);
1918
1919 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1920 __func__);
1921 return -ENOSYS;
1922}
1923
13483730
MC
1924static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1925 struct dev_db_entry *fw_ddb_entry,
1926 struct iscsi_cls_session *cls_sess,
1927 struct iscsi_cls_conn *cls_conn)
1928{
1929 int buflen = 0;
1930 struct iscsi_session *sess;
376738af 1931 struct ddb_entry *ddb_entry;
13483730
MC
1932 struct iscsi_conn *conn;
1933 char ip_addr[DDB_IPADDR_LEN];
1934 uint16_t options = 0;
1935
1936 sess = cls_sess->dd_data;
376738af 1937 ddb_entry = sess->dd_data;
13483730
MC
1938 conn = cls_conn->dd_data;
1939
376738af
NJ
1940 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
1941
13483730
MC
1942 conn->max_recv_dlength = BYTE_UNITS *
1943 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1944
1945 conn->max_xmit_dlength = BYTE_UNITS *
1946 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1947
1948 sess->initial_r2t_en =
1949 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1950
1951 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1952
1953 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1954
1955 sess->first_burst = BYTE_UNITS *
1956 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1957
1958 sess->max_burst = BYTE_UNITS *
1959 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1960
1961 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1962
1963 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1964
1965 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
1966
1967 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1968
1969 options = le16_to_cpu(fw_ddb_entry->options);
1970 if (options & DDB_OPT_IPV6_DEVICE)
1971 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
1972 else
1973 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
1974
1975 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
1976 (char *)fw_ddb_entry->iscsi_name, buflen);
1977 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
1978 (char *)ha->name_string, buflen);
1979 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
1980 (char *)ip_addr, buflen);
6c1b8789
VC
1981 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
1982 (char *)fw_ddb_entry->iscsi_alias, buflen);
13483730
MC
1983}
1984
1985void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
1986 struct ddb_entry *ddb_entry)
1987{
1988 struct iscsi_cls_session *cls_sess;
1989 struct iscsi_cls_conn *cls_conn;
1990 uint32_t ddb_state;
1991 dma_addr_t fw_ddb_entry_dma;
1992 struct dev_db_entry *fw_ddb_entry;
1993
1994 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1995 &fw_ddb_entry_dma, GFP_KERNEL);
1996 if (!fw_ddb_entry) {
1997 ql4_printk(KERN_ERR, ha,
1998 "%s: Unable to allocate dma buffer\n", __func__);
1999 goto exit_session_conn_fwddb_param;
2000 }
2001
2002 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2003 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2004 NULL, NULL, NULL) == QLA_ERROR) {
2005 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2006 "get_ddb_entry for fw_ddb_index %d\n",
2007 ha->host_no, __func__,
2008 ddb_entry->fw_ddb_index));
2009 goto exit_session_conn_fwddb_param;
2010 }
2011
2012 cls_sess = ddb_entry->sess;
2013
2014 cls_conn = ddb_entry->conn;
2015
2016 /* Update params */
2017 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2018
2019exit_session_conn_fwddb_param:
2020 if (fw_ddb_entry)
2021 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2022 fw_ddb_entry, fw_ddb_entry_dma);
2023}
2024
b3a271a9
MR
2025void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2026 struct ddb_entry *ddb_entry)
2027{
2028 struct iscsi_cls_session *cls_sess;
2029 struct iscsi_cls_conn *cls_conn;
2030 struct iscsi_session *sess;
2031 struct iscsi_conn *conn;
2032 uint32_t ddb_state;
2033 dma_addr_t fw_ddb_entry_dma;
2034 struct dev_db_entry *fw_ddb_entry;
2035
2036 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2037 &fw_ddb_entry_dma, GFP_KERNEL);
2038 if (!fw_ddb_entry) {
2039 ql4_printk(KERN_ERR, ha,
2040 "%s: Unable to allocate dma buffer\n", __func__);
13483730 2041 goto exit_session_conn_param;
b3a271a9
MR
2042 }
2043
2044 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2045 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2046 NULL, NULL, NULL) == QLA_ERROR) {
2047 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2048 "get_ddb_entry for fw_ddb_index %d\n",
2049 ha->host_no, __func__,
2050 ddb_entry->fw_ddb_index));
13483730 2051 goto exit_session_conn_param;
b3a271a9
MR
2052 }
2053
2054 cls_sess = ddb_entry->sess;
2055 sess = cls_sess->dd_data;
2056
2057 cls_conn = ddb_entry->conn;
2058 conn = cls_conn->dd_data;
2059
13483730
MC
2060 /* Update timers after login */
2061 ddb_entry->default_relogin_timeout =
c28eaaca
NJ
2062 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2063 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2064 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
13483730
MC
2065 ddb_entry->default_time2wait =
2066 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2067
b3a271a9 2068 /* Update params */
376738af 2069 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
b3a271a9
MR
2070 conn->max_recv_dlength = BYTE_UNITS *
2071 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2072
2073 conn->max_xmit_dlength = BYTE_UNITS *
2074 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2075
2076 sess->initial_r2t_en =
2077 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2078
2079 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2080
2081 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2082
2083 sess->first_burst = BYTE_UNITS *
2084 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2085
2086 sess->max_burst = BYTE_UNITS *
2087 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2088
2089 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2090
2091 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2092
2093 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2094
2095 memcpy(sess->initiatorname, ha->name_string,
2096 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
13483730 2097
6c1b8789
VC
2098 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2099 (char *)fw_ddb_entry->iscsi_alias, 0);
2100
13483730
MC
2101exit_session_conn_param:
2102 if (fw_ddb_entry)
2103 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2104 fw_ddb_entry, fw_ddb_entry_dma);
b3a271a9
MR
2105}
2106
afaf5a2d
DS
2107/*
2108 * Timer routines
2109 */
2110
2111static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2112 unsigned long interval)
2113{
2114 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2115 __func__, ha->host->host_no));
2116 init_timer(&ha->timer);
2117 ha->timer.expires = jiffies + interval * HZ;
2118 ha->timer.data = (unsigned long)ha;
2119 ha->timer.function = (void (*)(unsigned long))func;
2120 add_timer(&ha->timer);
2121 ha->timer_active = 1;
2122}
2123
2124static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2125{
2126 del_timer_sync(&ha->timer);
2127 ha->timer_active = 0;
2128}
2129
2130/***
b3a271a9
MR
2131 * qla4xxx_mark_device_missing - blocks the session
2132 * @cls_session: Pointer to the session to be blocked
afaf5a2d
DS
2133 * @ddb_entry: Pointer to device database entry
2134 *
f4f5df23 2135 * This routine marks a device missing and close connection.
afaf5a2d 2136 **/
b3a271a9 2137void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
afaf5a2d 2138{
b3a271a9 2139 iscsi_block_session(cls_session);
afaf5a2d
DS
2140}
2141
f4f5df23
VC
2142/**
2143 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2144 * @ha: Pointer to host adapter structure.
2145 *
2146 * This routine marks a device missing and resets the relogin retry count.
2147 **/
2148void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2149{
b3a271a9 2150 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
f4f5df23
VC
2151}
2152
afaf5a2d
DS
2153static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2154 struct ddb_entry *ddb_entry,
8f0722ca 2155 struct scsi_cmnd *cmd)
afaf5a2d
DS
2156{
2157 struct srb *srb;
2158
2159 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2160 if (!srb)
2161 return srb;
2162
09a0f719 2163 kref_init(&srb->srb_ref);
afaf5a2d
DS
2164 srb->ha = ha;
2165 srb->ddb = ddb_entry;
2166 srb->cmd = cmd;
2167 srb->flags = 0;
5369887a 2168 CMD_SP(cmd) = (void *)srb;
afaf5a2d
DS
2169
2170 return srb;
2171}
2172
2173static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2174{
2175 struct scsi_cmnd *cmd = srb->cmd;
2176
2177 if (srb->flags & SRB_DMA_VALID) {
5f7186c8 2178 scsi_dma_unmap(cmd);
afaf5a2d
DS
2179 srb->flags &= ~SRB_DMA_VALID;
2180 }
5369887a 2181 CMD_SP(cmd) = NULL;
afaf5a2d
DS
2182}
2183
09a0f719 2184void qla4xxx_srb_compl(struct kref *ref)
afaf5a2d 2185{
09a0f719 2186 struct srb *srb = container_of(ref, struct srb, srb_ref);
afaf5a2d 2187 struct scsi_cmnd *cmd = srb->cmd;
09a0f719 2188 struct scsi_qla_host *ha = srb->ha;
afaf5a2d
DS
2189
2190 qla4xxx_srb_free_dma(ha, srb);
2191
2192 mempool_free(srb, ha->srb_mempool);
2193
2194 cmd->scsi_done(cmd);
2195}
2196
2197/**
2198 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
8f0722ca 2199 * @host: scsi host
afaf5a2d 2200 * @cmd: Pointer to Linux's SCSI command structure
afaf5a2d
DS
2201 *
2202 * Remarks:
2203 * This routine is invoked by Linux to send a SCSI command to the driver.
2204 * The mid-level driver tries to ensure that queuecommand never gets
2205 * invoked concurrently with itself or the interrupt handler (although
2206 * the interrupt handler may call this routine as part of request-
2207 * completion handling). Unfortunely, it sometimes calls the scheduler
2208 * in interrupt context which is a big NO! NO!.
2209 **/
8f0722ca 2210static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
afaf5a2d 2211{
8f0722ca 2212 struct scsi_qla_host *ha = to_qla_host(host);
afaf5a2d 2213 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7fb1921b 2214 struct iscsi_cls_session *sess = ddb_entry->sess;
afaf5a2d
DS
2215 struct srb *srb;
2216 int rval;
2217
2232be0d
LC
2218 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2219 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2220 cmd->result = DID_NO_CONNECT << 16;
2221 else
2222 cmd->result = DID_REQUEUE << 16;
2223 goto qc_fail_command;
2224 }
2225
7fb1921b
MC
2226 if (!sess) {
2227 cmd->result = DID_IMM_RETRY << 16;
2228 goto qc_fail_command;
2229 }
2230
2231 rval = iscsi_session_chkready(sess);
2232 if (rval) {
2233 cmd->result = rval;
2234 goto qc_fail_command;
2235 }
2236
f4f5df23
VC
2237 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2238 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2239 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2240 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2241 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2242 !test_bit(AF_ONLINE, &ha->flags) ||
b3a271a9 2243 !test_bit(AF_LINK_UP, &ha->flags) ||
f4f5df23 2244 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
477ffb9d
DS
2245 goto qc_host_busy;
2246
8f0722ca 2247 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
afaf5a2d 2248 if (!srb)
8f0722ca 2249 goto qc_host_busy;
afaf5a2d
DS
2250
2251 rval = qla4xxx_send_command_to_isp(ha, srb);
2252 if (rval != QLA_SUCCESS)
2253 goto qc_host_busy_free_sp;
2254
afaf5a2d
DS
2255 return 0;
2256
2257qc_host_busy_free_sp:
2258 qla4xxx_srb_free_dma(ha, srb);
2259 mempool_free(srb, ha->srb_mempool);
2260
afaf5a2d
DS
2261qc_host_busy:
2262 return SCSI_MLQUEUE_HOST_BUSY;
2263
2264qc_fail_command:
8f0722ca 2265 cmd->scsi_done(cmd);
afaf5a2d
DS
2266
2267 return 0;
2268}
2269
2270/**
2271 * qla4xxx_mem_free - frees memory allocated to adapter
2272 * @ha: Pointer to host adapter structure.
2273 *
2274 * Frees memory previously allocated by qla4xxx_mem_alloc
2275 **/
2276static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2277{
2278 if (ha->queues)
2279 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2280 ha->queues_dma);
2281
068237c8
TP
2282 if (ha->fw_dump)
2283 vfree(ha->fw_dump);
2284
afaf5a2d
DS
2285 ha->queues_len = 0;
2286 ha->queues = NULL;
2287 ha->queues_dma = 0;
2288 ha->request_ring = NULL;
2289 ha->request_dma = 0;
2290 ha->response_ring = NULL;
2291 ha->response_dma = 0;
2292 ha->shadow_regs = NULL;
2293 ha->shadow_regs_dma = 0;
068237c8
TP
2294 ha->fw_dump = NULL;
2295 ha->fw_dump_size = 0;
afaf5a2d
DS
2296
2297 /* Free srb pool. */
2298 if (ha->srb_mempool)
2299 mempool_destroy(ha->srb_mempool);
2300
2301 ha->srb_mempool = NULL;
2302
b3a271a9
MR
2303 if (ha->chap_dma_pool)
2304 dma_pool_destroy(ha->chap_dma_pool);
2305
4549415a
LC
2306 if (ha->chap_list)
2307 vfree(ha->chap_list);
2308 ha->chap_list = NULL;
2309
13483730
MC
2310 if (ha->fw_ddb_dma_pool)
2311 dma_pool_destroy(ha->fw_ddb_dma_pool);
2312
afaf5a2d 2313 /* release io space registers */
f4f5df23
VC
2314 if (is_qla8022(ha)) {
2315 if (ha->nx_pcibase)
2316 iounmap(
2317 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
f4f5df23 2318 } else if (ha->reg)
afaf5a2d
DS
2319 iounmap(ha->reg);
2320 pci_release_regions(ha->pdev);
2321}
2322
2323/**
2324 * qla4xxx_mem_alloc - allocates memory for use by adapter.
2325 * @ha: Pointer to host adapter structure
2326 *
2327 * Allocates DMA memory for request and response queues. Also allocates memory
2328 * for srbs.
2329 **/
2330static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2331{
2332 unsigned long align;
2333
2334 /* Allocate contiguous block of DMA memory for queues. */
2335 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2336 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2337 sizeof(struct shadow_regs) +
2338 MEM_ALIGN_VALUE +
2339 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2340 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2341 &ha->queues_dma, GFP_KERNEL);
2342 if (ha->queues == NULL) {
c2660df3
VC
2343 ql4_printk(KERN_WARNING, ha,
2344 "Memory Allocation failed - queues.\n");
afaf5a2d
DS
2345
2346 goto mem_alloc_error_exit;
2347 }
2348 memset(ha->queues, 0, ha->queues_len);
2349
2350 /*
2351 * As per RISC alignment requirements -- the bus-address must be a
2352 * multiple of the request-ring size (in bytes).
2353 */
2354 align = 0;
2355 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2356 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2357 (MEM_ALIGN_VALUE - 1));
2358
2359 /* Update request and response queue pointers. */
2360 ha->request_dma = ha->queues_dma + align;
2361 ha->request_ring = (struct queue_entry *) (ha->queues + align);
2362 ha->response_dma = ha->queues_dma + align +
2363 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2364 ha->response_ring = (struct queue_entry *) (ha->queues + align +
2365 (REQUEST_QUEUE_DEPTH *
2366 QUEUE_SIZE));
2367 ha->shadow_regs_dma = ha->queues_dma + align +
2368 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2369 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2370 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2371 (REQUEST_QUEUE_DEPTH *
2372 QUEUE_SIZE) +
2373 (RESPONSE_QUEUE_DEPTH *
2374 QUEUE_SIZE));
2375
2376 /* Allocate memory for srb pool. */
2377 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2378 mempool_free_slab, srb_cachep);
2379 if (ha->srb_mempool == NULL) {
c2660df3
VC
2380 ql4_printk(KERN_WARNING, ha,
2381 "Memory Allocation failed - SRB Pool.\n");
afaf5a2d
DS
2382
2383 goto mem_alloc_error_exit;
2384 }
2385
b3a271a9
MR
2386 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2387 CHAP_DMA_BLOCK_SIZE, 8, 0);
2388
2389 if (ha->chap_dma_pool == NULL) {
2390 ql4_printk(KERN_WARNING, ha,
2391 "%s: chap_dma_pool allocation failed..\n", __func__);
2392 goto mem_alloc_error_exit;
2393 }
2394
13483730
MC
2395 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2396 DDB_DMA_BLOCK_SIZE, 8, 0);
2397
2398 if (ha->fw_ddb_dma_pool == NULL) {
2399 ql4_printk(KERN_WARNING, ha,
2400 "%s: fw_ddb_dma_pool allocation failed..\n",
2401 __func__);
2402 goto mem_alloc_error_exit;
2403 }
2404
afaf5a2d
DS
2405 return QLA_SUCCESS;
2406
2407mem_alloc_error_exit:
2408 qla4xxx_mem_free(ha);
2409 return QLA_ERROR;
2410}
2411
4f77083e
MH
2412/**
2413 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2414 * @ha: adapter block pointer.
2415 *
2416 * Note: The caller should not hold the idc lock.
2417 **/
2418static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2419{
2420 uint32_t temp, temp_state, temp_val;
2421 int status = QLA_SUCCESS;
2422
2423 temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
2424
2425 temp_state = qla82xx_get_temp_state(temp);
2426 temp_val = qla82xx_get_temp_val(temp);
2427
2428 if (temp_state == QLA82XX_TEMP_PANIC) {
2429 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2430 " exceeds maximum allowed. Hardware has been shut"
2431 " down.\n", temp_val);
2432 status = QLA_ERROR;
2433 } else if (temp_state == QLA82XX_TEMP_WARN) {
2434 if (ha->temperature == QLA82XX_TEMP_NORMAL)
2435 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2436 " degrees C exceeds operating range."
2437 " Immediate action needed.\n", temp_val);
2438 } else {
2439 if (ha->temperature == QLA82XX_TEMP_WARN)
2440 ql4_printk(KERN_INFO, ha, "Device temperature is"
2441 " now %d degrees C in normal range.\n",
2442 temp_val);
2443 }
2444 ha->temperature = temp_state;
2445 return status;
2446}
2447
f4f5df23
VC
2448/**
2449 * qla4_8xxx_check_fw_alive - Check firmware health
2450 * @ha: Pointer to host adapter structure.
2451 *
2452 * Context: Interrupt
2453 **/
9ee91a38 2454static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
f4f5df23 2455{
9ee91a38
SS
2456 uint32_t fw_heartbeat_counter;
2457 int status = QLA_SUCCESS;
f4f5df23
VC
2458
2459 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2232be0d
LC
2460 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2461 if (fw_heartbeat_counter == 0xffffffff) {
2462 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2463 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2464 ha->host_no, __func__));
9ee91a38 2465 return status;
2232be0d 2466 }
f4f5df23
VC
2467
2468 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2469 ha->seconds_since_last_heartbeat++;
2470 /* FW not alive after 2 seconds */
2471 if (ha->seconds_since_last_heartbeat == 2) {
2472 ha->seconds_since_last_heartbeat = 0;
68d92ebf
VC
2473
2474 ql4_printk(KERN_INFO, ha,
2475 "scsi(%ld): %s, Dumping hw/fw registers:\n "
2476 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
2477 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
2478 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
2479 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
9ee91a38
SS
2480 ha->host_no, __func__,
2481 qla4_8xxx_rd_32(ha,
2482 QLA82XX_PEG_HALT_STATUS1),
68d92ebf
VC
2483 qla4_8xxx_rd_32(ha,
2484 QLA82XX_PEG_HALT_STATUS2),
2485 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
2486 0x3c),
2487 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
2488 0x3c),
2489 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
2490 0x3c),
2491 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
2492 0x3c),
2493 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2494 0x3c));
9ee91a38 2495 status = QLA_ERROR;
f4f5df23 2496 }
99457d75
LC
2497 } else
2498 ha->seconds_since_last_heartbeat = 0;
2499
f4f5df23 2500 ha->fw_heartbeat_counter = fw_heartbeat_counter;
9ee91a38 2501 return status;
f4f5df23
VC
2502}
2503
2504/**
2505 * qla4_8xxx_watchdog - Poll dev state
2506 * @ha: Pointer to host adapter structure.
2507 *
2508 * Context: Interrupt
2509 **/
2510void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2511{
9ee91a38 2512 uint32_t dev_state, halt_status;
f4f5df23
VC
2513
2514 /* don't poll if reset is going on */
d56a1f7b
LC
2515 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2516 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
977f46a4 2517 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
9ee91a38 2518 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
4f77083e
MH
2519
2520 if (qla4_8xxx_check_temp(ha)) {
e6bd0ebd
GM
2521 ql4_printk(KERN_INFO, ha, "disabling pause"
2522 " transmit on port 0 & 1.\n");
2523 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2524 CRB_NIU_XG_PAUSE_CTL_P0 |
2525 CRB_NIU_XG_PAUSE_CTL_P1);
4f77083e
MH
2526 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2527 qla4xxx_wake_dpc(ha);
2528 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
f4f5df23 2529 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3930b8c1
VC
2530 if (!ql4xdontresethba) {
2531 ql4_printk(KERN_INFO, ha, "%s: HW State: "
2532 "NEED RESET!\n", __func__);
2533 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2534 qla4xxx_wake_dpc(ha);
3930b8c1 2535 }
f4f5df23
VC
2536 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
2537 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3930b8c1
VC
2538 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2539 __func__);
f4f5df23
VC
2540 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
2541 qla4xxx_wake_dpc(ha);
2542 } else {
2543 /* Check firmware health */
9ee91a38 2544 if (qla4_8xxx_check_fw_alive(ha)) {
e6bd0ebd
GM
2545 ql4_printk(KERN_INFO, ha, "disabling pause"
2546 " transmit on port 0 & 1.\n");
2547 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2548 CRB_NIU_XG_PAUSE_CTL_P0 |
2549 CRB_NIU_XG_PAUSE_CTL_P1);
9ee91a38
SS
2550 halt_status = qla4_8xxx_rd_32(ha,
2551 QLA82XX_PEG_HALT_STATUS1);
2552
46801ba6 2553 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
527c8b2e
NJ
2554 ql4_printk(KERN_ERR, ha, "%s:"
2555 " Firmware aborted with"
2556 " error code 0x00006700."
2557 " Device is being reset\n",
2558 __func__);
2559
9ee91a38
SS
2560 /* Since we cannot change dev_state in interrupt
2561 * context, set appropriate DPC flag then wakeup
2562 * DPC */
2563 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2564 set_bit(DPC_HA_UNRECOVERABLE,
2565 &ha->dpc_flags);
2566 else {
2567 ql4_printk(KERN_INFO, ha, "%s: detect "
2568 "abort needed!\n", __func__);
2569 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2570 }
2571 qla4xxx_mailbox_premature_completion(ha);
2572 qla4xxx_wake_dpc(ha);
2573 }
f4f5df23
VC
2574 }
2575 }
2576}
2577
4a4bc2e9 2578static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
13483730
MC
2579{
2580 struct iscsi_session *sess;
2581 struct ddb_entry *ddb_entry;
2582 struct scsi_qla_host *ha;
2583
2584 sess = cls_sess->dd_data;
2585 ddb_entry = sess->dd_data;
2586 ha = ddb_entry->ha;
2587
2588 if (!(ddb_entry->ddb_type == FLASH_DDB))
2589 return;
2590
2591 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
2592 !iscsi_is_session_online(cls_sess)) {
2593 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
2594 INVALID_ENTRY) {
2595 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
2596 0) {
2597 atomic_set(&ddb_entry->retry_relogin_timer,
2598 INVALID_ENTRY);
2599 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2600 set_bit(DF_RELOGIN, &ddb_entry->flags);
2601 DEBUG2(ql4_printk(KERN_INFO, ha,
2602 "%s: index [%d] login device\n",
2603 __func__, ddb_entry->fw_ddb_index));
2604 } else
2605 atomic_dec(&ddb_entry->retry_relogin_timer);
2606 }
2607 }
2608
2609 /* Wait for relogin to timeout */
2610 if (atomic_read(&ddb_entry->relogin_timer) &&
2611 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
2612 /*
2613 * If the relogin times out and the device is
2614 * still NOT ONLINE then try and relogin again.
2615 */
2616 if (!iscsi_is_session_online(cls_sess)) {
2617 /* Reset retry relogin timer */
2618 atomic_inc(&ddb_entry->relogin_retry_count);
2619 DEBUG2(ql4_printk(KERN_INFO, ha,
2620 "%s: index[%d] relogin timed out-retrying"
2621 " relogin (%d), retry (%d)\n", __func__,
2622 ddb_entry->fw_ddb_index,
2623 atomic_read(&ddb_entry->relogin_retry_count),
2624 ddb_entry->default_time2wait + 4));
2625 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2626 atomic_set(&ddb_entry->retry_relogin_timer,
2627 ddb_entry->default_time2wait + 4);
2628 }
2629 }
2630}
2631
afaf5a2d
DS
2632/**
2633 * qla4xxx_timer - checks every second for work to do.
2634 * @ha: Pointer to host adapter structure.
2635 **/
2636static void qla4xxx_timer(struct scsi_qla_host *ha)
2637{
afaf5a2d 2638 int start_dpc = 0;
2232be0d
LC
2639 uint16_t w;
2640
13483730
MC
2641 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
2642
2232be0d
LC
2643 /* If we are in the middle of AER/EEH processing
2644 * skip any processing and reschedule the timer
2645 */
2646 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2647 mod_timer(&ha->timer, jiffies + HZ);
2648 return;
2649 }
2650
2651 /* Hardware read to trigger an EEH error during mailbox waits. */
2652 if (!pci_channel_offline(ha->pdev))
2653 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
afaf5a2d 2654
f4f5df23
VC
2655 if (is_qla8022(ha)) {
2656 qla4_8xxx_watchdog(ha);
2657 }
2658
f4f5df23
VC
2659 if (!is_qla8022(ha)) {
2660 /* Check for heartbeat interval. */
2661 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
2662 ha->heartbeat_interval != 0) {
2663 ha->seconds_since_last_heartbeat++;
2664 if (ha->seconds_since_last_heartbeat >
2665 ha->heartbeat_interval + 2)
2666 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2667 }
afaf5a2d
DS
2668 }
2669
ff884430
VC
2670 /* Process any deferred work. */
2671 if (!list_empty(&ha->work_list))
2672 start_dpc++;
2673
afaf5a2d 2674 /* Wakeup the dpc routine for this adapter, if needed. */
1b46807e 2675 if (start_dpc ||
afaf5a2d
DS
2676 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2677 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
2678 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
f4f5df23 2679 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
afaf5a2d
DS
2680 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2681 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
065aa1b4 2682 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
f4f5df23
VC
2683 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2684 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1b46807e 2685 test_bit(DPC_AEN, &ha->dpc_flags)) {
afaf5a2d
DS
2686 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
2687 " - dpc flags = 0x%lx\n",
2688 ha->host_no, __func__, ha->dpc_flags));
f4f5df23 2689 qla4xxx_wake_dpc(ha);
afaf5a2d
DS
2690 }
2691
2692 /* Reschedule timer thread to call us back in one second */
2693 mod_timer(&ha->timer, jiffies + HZ);
2694
2695 DEBUG2(ha->seconds_since_last_intr++);
2696}
2697
2698/**
2699 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
2700 * @ha: Pointer to host adapter structure.
2701 *
2702 * This routine stalls the driver until all outstanding commands are returned.
2703 * Caller must release the Hardware Lock prior to calling this routine.
2704 **/
2705static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
2706{
2707 uint32_t index = 0;
afaf5a2d
DS
2708 unsigned long flags;
2709 struct scsi_cmnd *cmd;
afaf5a2d 2710
f4f5df23
VC
2711 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
2712
2713 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
2714 "complete\n", WAIT_CMD_TOV));
2715
2716 while (!time_after_eq(jiffies, wtime)) {
afaf5a2d
DS
2717 spin_lock_irqsave(&ha->hardware_lock, flags);
2718 /* Find a command that hasn't completed. */
2719 for (index = 0; index < ha->host->can_queue; index++) {
2720 cmd = scsi_host_find_tag(ha->host, index);
a1e0063d
MC
2721 /*
2722 * We cannot just check if the index is valid,
2723 * becase if we are run from the scsi eh, then
2724 * the scsi/block layer is going to prevent
2725 * the tag from being released.
2726 */
2727 if (cmd != NULL && CMD_SP(cmd))
afaf5a2d
DS
2728 break;
2729 }
2730 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2731
2732 /* If No Commands are pending, wait is complete */
f4f5df23
VC
2733 if (index == ha->host->can_queue)
2734 return QLA_SUCCESS;
afaf5a2d 2735
f4f5df23
VC
2736 msleep(1000);
2737 }
2738 /* If we timed out on waiting for commands to come back
2739 * return ERROR. */
2740 return QLA_ERROR;
afaf5a2d
DS
2741}
2742
f4f5df23 2743int qla4xxx_hw_reset(struct scsi_qla_host *ha)
afaf5a2d 2744{
afaf5a2d 2745 uint32_t ctrl_status;
477ffb9d
DS
2746 unsigned long flags = 0;
2747
2748 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
afaf5a2d 2749
f4f5df23
VC
2750 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
2751 return QLA_ERROR;
2752
afaf5a2d
DS
2753 spin_lock_irqsave(&ha->hardware_lock, flags);
2754
2755 /*
2756 * If the SCSI Reset Interrupt bit is set, clear it.
2757 * Otherwise, the Soft Reset won't work.
2758 */
2759 ctrl_status = readw(&ha->reg->ctrl_status);
2760 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
2761 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2762
2763 /* Issue Soft Reset */
2764 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
2765 readl(&ha->reg->ctrl_status);
2766
2767 spin_unlock_irqrestore(&ha->hardware_lock, flags);
f4f5df23 2768 return QLA_SUCCESS;
477ffb9d
DS
2769}
2770
2771/**
2772 * qla4xxx_soft_reset - performs soft reset.
2773 * @ha: Pointer to host adapter structure.
2774 **/
2775int qla4xxx_soft_reset(struct scsi_qla_host *ha)
2776{
2777 uint32_t max_wait_time;
2778 unsigned long flags = 0;
f931c534 2779 int status;
477ffb9d
DS
2780 uint32_t ctrl_status;
2781
f931c534
VC
2782 status = qla4xxx_hw_reset(ha);
2783 if (status != QLA_SUCCESS)
2784 return status;
afaf5a2d 2785
f931c534 2786 status = QLA_ERROR;
afaf5a2d
DS
2787 /* Wait until the Network Reset Intr bit is cleared */
2788 max_wait_time = RESET_INTR_TOV;
2789 do {
2790 spin_lock_irqsave(&ha->hardware_lock, flags);
2791 ctrl_status = readw(&ha->reg->ctrl_status);
2792 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2793
2794 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
2795 break;
2796
2797 msleep(1000);
2798 } while ((--max_wait_time));
2799
2800 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
2801 DEBUG2(printk(KERN_WARNING
2802 "scsi%ld: Network Reset Intr not cleared by "
2803 "Network function, clearing it now!\n",
2804 ha->host_no));
2805 spin_lock_irqsave(&ha->hardware_lock, flags);
2806 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
2807 readl(&ha->reg->ctrl_status);
2808 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2809 }
2810
2811 /* Wait until the firmware tells us the Soft Reset is done */
2812 max_wait_time = SOFT_RESET_TOV;
2813 do {
2814 spin_lock_irqsave(&ha->hardware_lock, flags);
2815 ctrl_status = readw(&ha->reg->ctrl_status);
2816 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2817
2818 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
2819 status = QLA_SUCCESS;
2820 break;
2821 }
2822
2823 msleep(1000);
2824 } while ((--max_wait_time));
2825
2826 /*
2827 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
2828 * after the soft reset has taken place.
2829 */
2830 spin_lock_irqsave(&ha->hardware_lock, flags);
2831 ctrl_status = readw(&ha->reg->ctrl_status);
2832 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2833 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2834 readl(&ha->reg->ctrl_status);
2835 }
2836 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2837
2838 /* If soft reset fails then most probably the bios on other
2839 * function is also enabled.
2840 * Since the initialization is sequential the other fn
2841 * wont be able to acknowledge the soft reset.
2842 * Issue a force soft reset to workaround this scenario.
2843 */
2844 if (max_wait_time == 0) {
2845 /* Issue Force Soft Reset */
2846 spin_lock_irqsave(&ha->hardware_lock, flags);
2847 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2848 readl(&ha->reg->ctrl_status);
2849 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2850 /* Wait until the firmware tells us the Soft Reset is done */
2851 max_wait_time = SOFT_RESET_TOV;
2852 do {
2853 spin_lock_irqsave(&ha->hardware_lock, flags);
2854 ctrl_status = readw(&ha->reg->ctrl_status);
2855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2856
2857 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2858 status = QLA_SUCCESS;
2859 break;
2860 }
2861
2862 msleep(1000);
2863 } while ((--max_wait_time));
2864 }
2865
2866 return status;
2867}
2868
afaf5a2d 2869/**
f4f5df23 2870 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
afaf5a2d 2871 * @ha: Pointer to host adapter structure.
f4f5df23 2872 * @res: returned scsi status
afaf5a2d
DS
2873 *
2874 * This routine is called just prior to a HARD RESET to return all
2875 * outstanding commands back to the Operating System.
2876 * Caller should make sure that the following locks are released
2877 * before this calling routine: Hardware lock, and io_request_lock.
2878 **/
f4f5df23 2879static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
afaf5a2d
DS
2880{
2881 struct srb *srb;
2882 int i;
2883 unsigned long flags;
2884
2885 spin_lock_irqsave(&ha->hardware_lock, flags);
2886 for (i = 0; i < ha->host->can_queue; i++) {
2887 srb = qla4xxx_del_from_active_array(ha, i);
2888 if (srb != NULL) {
f4f5df23 2889 srb->cmd->result = res;
09a0f719 2890 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
afaf5a2d
DS
2891 }
2892 }
2893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
afaf5a2d
DS
2894}
2895
f4f5df23
VC
2896void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2897{
2898 clear_bit(AF_ONLINE, &ha->flags);
2899
2900 /* Disable the board */
2901 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
f4f5df23
VC
2902
2903 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2904 qla4xxx_mark_all_devices_missing(ha);
2905 clear_bit(AF_INIT_DONE, &ha->flags);
2906}
2907
b3a271a9
MR
2908static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2909{
2910 struct iscsi_session *sess;
2911 struct ddb_entry *ddb_entry;
2912
2913 sess = cls_session->dd_data;
2914 ddb_entry = sess->dd_data;
2915 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
13483730
MC
2916
2917 if (ddb_entry->ddb_type == FLASH_DDB)
2918 iscsi_block_session(ddb_entry->sess);
2919 else
2920 iscsi_session_failure(cls_session->dd_data,
2921 ISCSI_ERR_CONN_FAILED);
b3a271a9
MR
2922}
2923
afaf5a2d
DS
2924/**
2925 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2926 * @ha: Pointer to host adapter structure.
afaf5a2d 2927 **/
f4f5df23 2928static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
afaf5a2d 2929{
f4f5df23
VC
2930 int status = QLA_ERROR;
2931 uint8_t reset_chip = 0;
8e0f3a66 2932 uint32_t dev_state;
9ee91a38 2933 unsigned long wait;
afaf5a2d
DS
2934
2935 /* Stall incoming I/O until we are done */
f4f5df23 2936 scsi_block_requests(ha->host);
afaf5a2d 2937 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9 2938 clear_bit(AF_LINK_UP, &ha->flags);
50a29aec 2939
f4f5df23 2940 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
afaf5a2d 2941
f4f5df23 2942 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
afaf5a2d 2943
b3a271a9
MR
2944 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2945
f4f5df23
VC
2946 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2947 reset_chip = 1;
afaf5a2d 2948
f4f5df23
VC
2949 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2950 * do not reset adapter, jump to initialize_adapter */
2951 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2952 status = QLA_SUCCESS;
2953 goto recover_ha_init_adapter;
2954 }
afaf5a2d 2955
f4f5df23
VC
2956 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2957 * from eh_host_reset or ioctl module */
2958 if (is_qla8022(ha) && !reset_chip &&
2959 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2960
2961 DEBUG2(ql4_printk(KERN_INFO, ha,
2962 "scsi%ld: %s - Performing stop_firmware...\n",
2963 ha->host_no, __func__));
2964 status = ha->isp_ops->reset_firmware(ha);
2965 if (status == QLA_SUCCESS) {
2bd1e2be
NJ
2966 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2967 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2968 ha->isp_ops->disable_intrs(ha);
2969 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2970 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2971 } else {
2972 /* If the stop_firmware fails then
2973 * reset the entire chip */
2974 reset_chip = 1;
2975 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2976 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2977 }
2978 }
dca05c4c 2979
f4f5df23
VC
2980 /* Issue full chip reset if recovering from a catastrophic error,
2981 * or if stop_firmware fails for ISP-82xx.
2982 * This is the default case for ISP-4xxx */
2983 if (!is_qla8022(ha) || reset_chip) {
9ee91a38
SS
2984 if (!is_qla8022(ha))
2985 goto chip_reset;
2986
2987 /* Check if 82XX firmware is alive or not
2988 * We may have arrived here from NEED_RESET
2989 * detection only */
2990 if (test_bit(AF_FW_RECOVERY, &ha->flags))
2991 goto chip_reset;
2992
2993 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
2994 while (time_before(jiffies, wait)) {
2995 if (qla4_8xxx_check_fw_alive(ha)) {
2996 qla4xxx_mailbox_premature_completion(ha);
2997 break;
2998 }
2999
3000 set_current_state(TASK_UNINTERRUPTIBLE);
3001 schedule_timeout(HZ);
3002 }
da106212 3003chip_reset:
2bd1e2be
NJ
3004 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3005 qla4xxx_cmd_wait(ha);
da106212 3006
f4f5df23
VC
3007 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3008 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3009 DEBUG2(ql4_printk(KERN_INFO, ha,
3010 "scsi%ld: %s - Performing chip reset..\n",
3011 ha->host_no, __func__));
3012 status = ha->isp_ops->reset_chip(ha);
3013 }
afaf5a2d
DS
3014
3015 /* Flush any pending ddb changed AENs */
3016 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3017
f4f5df23
VC
3018recover_ha_init_adapter:
3019 /* Upon successful firmware/chip reset, re-initialize the adapter */
afaf5a2d 3020 if (status == QLA_SUCCESS) {
f4f5df23
VC
3021 /* For ISP-4xxx, force function 1 to always initialize
3022 * before function 3 to prevent both funcions from
3023 * stepping on top of the other */
3024 if (!is_qla8022(ha) && (ha->mac_index == 3))
3025 ssleep(6);
3026
3027 /* NOTE: AF_ONLINE flag set upon successful completion of
3028 * qla4xxx_initialize_adapter */
13483730 3029 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
afaf5a2d
DS
3030 }
3031
f4f5df23
VC
3032 /* Retry failed adapter initialization, if necessary
3033 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3034 * case to prevent ping-pong resets between functions */
3035 if (!test_bit(AF_ONLINE, &ha->flags) &&
3036 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 3037 /* Adapter initialization failed, see if we can retry
f4f5df23
VC
3038 * resetting the ha.
3039 * Since we don't want to block the DPC for too long
3040 * with multiple resets in the same thread,
3041 * utilize DPC to retry */
8e0f3a66
SR
3042 if (is_qla8022(ha)) {
3043 qla4_8xxx_idc_lock(ha);
3044 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3045 qla4_8xxx_idc_unlock(ha);
3046 if (dev_state == QLA82XX_DEV_FAILED) {
3047 ql4_printk(KERN_INFO, ha, "%s: don't retry "
3048 "recover adapter. H/W is in Failed "
3049 "state\n", __func__);
3050 qla4xxx_dead_adapter_cleanup(ha);
3051 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3052 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3053 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3054 &ha->dpc_flags);
3055 status = QLA_ERROR;
3056
3057 goto exit_recover;
3058 }
3059 }
3060
afaf5a2d
DS
3061 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3062 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3063 DEBUG2(printk("scsi%ld: recover adapter - retrying "
3064 "(%d) more times\n", ha->host_no,
3065 ha->retry_reset_ha_cnt));
3066 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3067 status = QLA_ERROR;
3068 } else {
3069 if (ha->retry_reset_ha_cnt > 0) {
3070 /* Schedule another Reset HA--DPC will retry */
3071 ha->retry_reset_ha_cnt--;
3072 DEBUG2(printk("scsi%ld: recover adapter - "
3073 "retry remaining %d\n",
3074 ha->host_no,
3075 ha->retry_reset_ha_cnt));
3076 status = QLA_ERROR;
3077 }
3078
3079 if (ha->retry_reset_ha_cnt == 0) {
3080 /* Recover adapter retries have been exhausted.
3081 * Adapter DEAD */
3082 DEBUG2(printk("scsi%ld: recover adapter "
3083 "failed - board disabled\n",
3084 ha->host_no));
f4f5df23 3085 qla4xxx_dead_adapter_cleanup(ha);
afaf5a2d
DS
3086 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3087 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 3088 clear_bit(DPC_RESET_HA_FW_CONTEXT,
afaf5a2d
DS
3089 &ha->dpc_flags);
3090 status = QLA_ERROR;
3091 }
3092 }
3093 } else {
3094 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 3095 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
afaf5a2d
DS
3096 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3097 }
3098
8e0f3a66 3099exit_recover:
afaf5a2d
DS
3100 ha->adapter_error_count++;
3101
f4f5df23
VC
3102 if (test_bit(AF_ONLINE, &ha->flags))
3103 ha->isp_ops->enable_intrs(ha);
3104
3105 scsi_unblock_requests(ha->host);
3106
3107 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3108 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
25985edc 3109 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
afaf5a2d 3110
afaf5a2d
DS
3111 return status;
3112}
3113
b3a271a9 3114static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2d7924e6 3115{
b3a271a9
MR
3116 struct iscsi_session *sess;
3117 struct ddb_entry *ddb_entry;
3118 struct scsi_qla_host *ha;
2d7924e6 3119
b3a271a9
MR
3120 sess = cls_session->dd_data;
3121 ddb_entry = sess->dd_data;
3122 ha = ddb_entry->ha;
3123 if (!iscsi_is_session_online(cls_session)) {
3124 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3125 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3126 " unblock session\n", ha->host_no, __func__,
3127 ddb_entry->fw_ddb_index);
3128 iscsi_unblock_session(ddb_entry->sess);
3129 } else {
3130 /* Trigger relogin */
13483730
MC
3131 if (ddb_entry->ddb_type == FLASH_DDB) {
3132 if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
3133 qla4xxx_arm_relogin_timer(ddb_entry);
3134 } else
3135 iscsi_session_failure(cls_session->dd_data,
3136 ISCSI_ERR_CONN_FAILED);
2d7924e6
VC
3137 }
3138 }
3139}
3140
13483730
MC
3141int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3142{
3143 struct iscsi_session *sess;
3144 struct ddb_entry *ddb_entry;
3145 struct scsi_qla_host *ha;
3146
3147 sess = cls_session->dd_data;
3148 ddb_entry = sess->dd_data;
3149 ha = ddb_entry->ha;
3150 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3151 " unblock session\n", ha->host_no, __func__,
3152 ddb_entry->fw_ddb_index);
3153
3154 iscsi_unblock_session(ddb_entry->sess);
3155
3156 /* Start scan target */
3157 if (test_bit(AF_ONLINE, &ha->flags)) {
3158 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3159 " start scan\n", ha->host_no, __func__,
3160 ddb_entry->fw_ddb_index);
3161 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3162 }
3163 return QLA_SUCCESS;
3164}
3165
3166int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3167{
3168 struct iscsi_session *sess;
3169 struct ddb_entry *ddb_entry;
3170 struct scsi_qla_host *ha;
80c53e64 3171 int status = QLA_SUCCESS;
13483730
MC
3172
3173 sess = cls_session->dd_data;
3174 ddb_entry = sess->dd_data;
3175 ha = ddb_entry->ha;
3176 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3177 " unblock user space session\n", ha->host_no, __func__,
3178 ddb_entry->fw_ddb_index);
13483730 3179
80c53e64
MR
3180 if (!iscsi_is_session_online(cls_session)) {
3181 iscsi_conn_start(ddb_entry->conn);
3182 iscsi_conn_login_event(ddb_entry->conn,
3183 ISCSI_CONN_STATE_LOGGED_IN);
3184 } else {
3185 ql4_printk(KERN_INFO, ha,
3186 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3187 ha->host_no, __func__, ddb_entry->fw_ddb_index,
3188 cls_session->sid);
3189 status = QLA_ERROR;
3190 }
3191
3192 return status;
13483730
MC
3193}
3194
b3a271a9
MR
3195static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3196{
3197 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3198}
3199
13483730
MC
3200static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3201{
3202 uint16_t relogin_timer;
3203 struct iscsi_session *sess;
3204 struct ddb_entry *ddb_entry;
3205 struct scsi_qla_host *ha;
3206
3207 sess = cls_sess->dd_data;
3208 ddb_entry = sess->dd_data;
3209 ha = ddb_entry->ha;
3210
3211 relogin_timer = max(ddb_entry->default_relogin_timeout,
3212 (uint16_t)RELOGIN_TOV);
3213 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3214
3215 DEBUG2(ql4_printk(KERN_INFO, ha,
3216 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3217 ddb_entry->fw_ddb_index, relogin_timer));
3218
3219 qla4xxx_login_flash_ddb(cls_sess);
3220}
3221
3222static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3223{
3224 struct iscsi_session *sess;
3225 struct ddb_entry *ddb_entry;
3226 struct scsi_qla_host *ha;
3227
3228 sess = cls_sess->dd_data;
3229 ddb_entry = sess->dd_data;
3230 ha = ddb_entry->ha;
3231
3232 if (!(ddb_entry->ddb_type == FLASH_DDB))
3233 return;
3234
3235 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3236 !iscsi_is_session_online(cls_sess)) {
3237 DEBUG2(ql4_printk(KERN_INFO, ha,
3238 "relogin issued\n"));
3239 qla4xxx_relogin_flash_ddb(cls_sess);
3240 }
3241}
3242
f4f5df23
VC
3243void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3244{
1b46807e 3245 if (ha->dpc_thread)
f4f5df23 3246 queue_work(ha->dpc_thread, &ha->dpc_work);
f4f5df23
VC
3247}
3248
ff884430
VC
3249static struct qla4_work_evt *
3250qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3251 enum qla4_work_type type)
3252{
3253 struct qla4_work_evt *e;
3254 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3255
3256 e = kzalloc(size, GFP_ATOMIC);
3257 if (!e)
3258 return NULL;
3259
3260 INIT_LIST_HEAD(&e->list);
3261 e->type = type;
3262 return e;
3263}
3264
3265static void qla4xxx_post_work(struct scsi_qla_host *ha,
3266 struct qla4_work_evt *e)
3267{
3268 unsigned long flags;
3269
3270 spin_lock_irqsave(&ha->work_lock, flags);
3271 list_add_tail(&e->list, &ha->work_list);
3272 spin_unlock_irqrestore(&ha->work_lock, flags);
3273 qla4xxx_wake_dpc(ha);
3274}
3275
3276int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3277 enum iscsi_host_event_code aen_code,
3278 uint32_t data_size, uint8_t *data)
3279{
3280 struct qla4_work_evt *e;
3281
3282 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3283 if (!e)
3284 return QLA_ERROR;
3285
3286 e->u.aen.code = aen_code;
3287 e->u.aen.data_size = data_size;
3288 memcpy(e->u.aen.data, data, data_size);
3289
3290 qla4xxx_post_work(ha, e);
3291
3292 return QLA_SUCCESS;
3293}
3294
c0b9d3f7
VC
3295int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3296 uint32_t status, uint32_t pid,
3297 uint32_t data_size, uint8_t *data)
3298{
3299 struct qla4_work_evt *e;
3300
3301 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3302 if (!e)
3303 return QLA_ERROR;
3304
3305 e->u.ping.status = status;
3306 e->u.ping.pid = pid;
3307 e->u.ping.data_size = data_size;
3308 memcpy(e->u.ping.data, data, data_size);
3309
3310 qla4xxx_post_work(ha, e);
3311
3312 return QLA_SUCCESS;
3313}
3314
a7380a65 3315static void qla4xxx_do_work(struct scsi_qla_host *ha)
ff884430
VC
3316{
3317 struct qla4_work_evt *e, *tmp;
3318 unsigned long flags;
3319 LIST_HEAD(work);
3320
3321 spin_lock_irqsave(&ha->work_lock, flags);
3322 list_splice_init(&ha->work_list, &work);
3323 spin_unlock_irqrestore(&ha->work_lock, flags);
3324
3325 list_for_each_entry_safe(e, tmp, &work, list) {
3326 list_del_init(&e->list);
3327
3328 switch (e->type) {
3329 case QLA4_EVENT_AEN:
3330 iscsi_post_host_event(ha->host_no,
3331 &qla4xxx_iscsi_transport,
3332 e->u.aen.code,
3333 e->u.aen.data_size,
3334 e->u.aen.data);
3335 break;
c0b9d3f7
VC
3336 case QLA4_EVENT_PING_STATUS:
3337 iscsi_ping_comp_event(ha->host_no,
3338 &qla4xxx_iscsi_transport,
3339 e->u.ping.status,
3340 e->u.ping.pid,
3341 e->u.ping.data_size,
3342 e->u.ping.data);
3343 break;
ff884430
VC
3344 default:
3345 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3346 "supported", e->type);
3347 }
3348 kfree(e);
3349 }
3350}
3351
afaf5a2d
DS
3352/**
3353 * qla4xxx_do_dpc - dpc routine
3354 * @data: in our case pointer to adapter structure
3355 *
3356 * This routine is a task that is schedule by the interrupt handler
3357 * to perform the background processing for interrupts. We put it
3358 * on a task queue that is consumed whenever the scheduler runs; that's
3359 * so you can do anything (i.e. put the process to sleep etc). In fact,
3360 * the mid-level tries to sleep when it reaches the driver threshold
3361 * "host->can_queue". This can cause a panic if we were in our interrupt code.
3362 **/
c4028958 3363static void qla4xxx_do_dpc(struct work_struct *work)
afaf5a2d 3364{
c4028958
DH
3365 struct scsi_qla_host *ha =
3366 container_of(work, struct scsi_qla_host, dpc_work);
477ffb9d 3367 int status = QLA_ERROR;
afaf5a2d 3368
f26b9044 3369 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
f4f5df23
VC
3370 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3371 ha->host_no, __func__, ha->flags, ha->dpc_flags))
afaf5a2d
DS
3372
3373 /* Initialization not yet finished. Don't do anything yet. */
3374 if (!test_bit(AF_INIT_DONE, &ha->flags))
1b46807e 3375 return;
afaf5a2d 3376
2232be0d
LC
3377 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3378 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3379 ha->host_no, __func__, ha->flags));
1b46807e 3380 return;
2232be0d
LC
3381 }
3382
ff884430
VC
3383 /* post events to application */
3384 qla4xxx_do_work(ha);
3385
f4f5df23
VC
3386 if (is_qla8022(ha)) {
3387 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3388 qla4_8xxx_idc_lock(ha);
3389 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3390 QLA82XX_DEV_FAILED);
3391 qla4_8xxx_idc_unlock(ha);
3392 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3393 qla4_8xxx_device_state_handler(ha);
3394 }
3395 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3396 qla4_8xxx_need_qsnt_handler(ha);
3397 }
3398 }
3399
3400 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3401 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
afaf5a2d 3402 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
f4f5df23
VC
3403 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3404 if (ql4xdontresethba) {
3405 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3406 ha->host_no, __func__));
3407 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3408 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3409 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3410 goto dpc_post_reset_ha;
3411 }
3412 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3413 test_bit(DPC_RESET_HA, &ha->dpc_flags))
3414 qla4xxx_recover_adapter(ha);
afaf5a2d 3415
477ffb9d 3416 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 3417 uint8_t wait_time = RESET_INTR_TOV;
afaf5a2d 3418
afaf5a2d
DS
3419 while ((readw(&ha->reg->ctrl_status) &
3420 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3421 if (--wait_time == 0)
3422 break;
afaf5a2d 3423 msleep(1000);
afaf5a2d 3424 }
afaf5a2d
DS
3425 if (wait_time == 0)
3426 DEBUG2(printk("scsi%ld: %s: SR|FSR "
3427 "bit not cleared-- resetting\n",
3428 ha->host_no, __func__));
f4f5df23 3429 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
477ffb9d
DS
3430 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3431 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
f4f5df23 3432 status = qla4xxx_recover_adapter(ha);
477ffb9d
DS
3433 }
3434 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3435 if (status == QLA_SUCCESS)
f4f5df23 3436 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
3437 }
3438 }
3439
f4f5df23 3440dpc_post_reset_ha:
afaf5a2d
DS
3441 /* ---- process AEN? --- */
3442 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3443 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3444
3445 /* ---- Get DHCP IP Address? --- */
3446 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3447 qla4xxx_get_dhcp_ip_address(ha);
3448
13483730
MC
3449 /* ---- relogin device? --- */
3450 if (adapter_up(ha) &&
3451 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3452 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3453 }
3454
065aa1b4
VC
3455 /* ---- link change? --- */
3456 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3457 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3458 /* ---- link down? --- */
2d7924e6 3459 qla4xxx_mark_all_devices_missing(ha);
065aa1b4
VC
3460 } else {
3461 /* ---- link up? --- *
3462 * F/W will auto login to all devices ONLY ONCE after
3463 * link up during driver initialization and runtime
3464 * fatal error recovery. Therefore, the driver must
3465 * manually relogin to devices when recovering from
3466 * connection failures, logouts, expired KATO, etc. */
13483730
MC
3467 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3468 qla4xxx_build_ddb_list(ha, ha->is_reset);
3469 iscsi_host_for_each_session(ha->host,
3470 qla4xxx_login_flash_ddb);
3471 } else
3472 qla4xxx_relogin_all_devices(ha);
065aa1b4
VC
3473 }
3474 }
afaf5a2d
DS
3475}
3476
3477/**
3478 * qla4xxx_free_adapter - release the adapter
3479 * @ha: pointer to adapter structure
3480 **/
3481static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3482{
8a288960 3483 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
afaf5a2d
DS
3484
3485 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
3486 /* Turn-off interrupts on the card. */
f4f5df23 3487 ha->isp_ops->disable_intrs(ha);
afaf5a2d
DS
3488 }
3489
d9e62e51
VC
3490 if (is_qla40XX(ha)) {
3491 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
3492 &ha->reg->ctrl_status);
3493 readl(&ha->reg->ctrl_status);
3494 } else if (is_qla8022(ha)) {
3495 writel(0, &ha->qla4_8xxx_reg->host_int);
3496 readl(&ha->qla4_8xxx_reg->host_int);
3497 }
3498
f4f5df23
VC
3499 /* Remove timer thread, if present */
3500 if (ha->timer_active)
3501 qla4xxx_stop_timer(ha);
3502
afaf5a2d
DS
3503 /* Kill the kernel thread for this host */
3504 if (ha->dpc_thread)
3505 destroy_workqueue(ha->dpc_thread);
3506
b3a271a9
MR
3507 /* Kill the kernel thread for this host */
3508 if (ha->task_wq)
3509 destroy_workqueue(ha->task_wq);
3510
f4f5df23
VC
3511 /* Put firmware in known state */
3512 ha->isp_ops->reset_firmware(ha);
afaf5a2d 3513
f4f5df23
VC
3514 if (is_qla8022(ha)) {
3515 qla4_8xxx_idc_lock(ha);
3516 qla4_8xxx_clear_drv_active(ha);
3517 qla4_8xxx_idc_unlock(ha);
3518 }
afaf5a2d 3519
afaf5a2d
DS
3520 /* Detach interrupts */
3521 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
f4f5df23 3522 qla4xxx_free_irqs(ha);
afaf5a2d 3523
bee4fe8e
DS
3524 /* free extra memory */
3525 qla4xxx_mem_free(ha);
f4f5df23
VC
3526}
3527
3528int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3529{
3530 int status = 0;
f4f5df23
VC
3531 unsigned long mem_base, mem_len, db_base, db_len;
3532 struct pci_dev *pdev = ha->pdev;
3533
3534 status = pci_request_regions(pdev, DRIVER_NAME);
3535 if (status) {
3536 printk(KERN_WARNING
3537 "scsi(%ld) Failed to reserve PIO regions (%s) "
3538 "status=%d\n", ha->host_no, pci_name(pdev), status);
3539 goto iospace_error_exit;
3540 }
3541
f4f5df23 3542 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
7d7311c4
SS
3543 __func__, pdev->revision));
3544 ha->revision_id = pdev->revision;
bee4fe8e 3545
f4f5df23
VC
3546 /* remap phys address */
3547 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
3548 mem_len = pci_resource_len(pdev, 0);
3549 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
3550 __func__, mem_base, mem_len));
afaf5a2d 3551
f4f5df23
VC
3552 /* mapping of pcibase pointer */
3553 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
3554 if (!ha->nx_pcibase) {
3555 printk(KERN_ERR
3556 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
3557 pci_release_regions(ha->pdev);
3558 goto iospace_error_exit;
3559 }
3560
3561 /* Mapping of IO base pointer, door bell read and write pointer */
3562
3563 /* mapping of IO base pointer */
3564 ha->qla4_8xxx_reg =
3565 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
3566 0xbc000 + (ha->pdev->devfn << 11));
3567
3568 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
3569 db_len = pci_resource_len(pdev, 4);
3570
2657c800
SS
3571 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3572 QLA82XX_CAM_RAM_DB2);
f4f5df23 3573
2657c800 3574 return 0;
f4f5df23
VC
3575iospace_error_exit:
3576 return -ENOMEM;
afaf5a2d
DS
3577}
3578
3579/***
3580 * qla4xxx_iospace_config - maps registers
3581 * @ha: pointer to adapter structure
3582 *
3583 * This routines maps HBA's registers from the pci address space
3584 * into the kernel virtual address space for memory mapped i/o.
3585 **/
f4f5df23 3586int qla4xxx_iospace_config(struct scsi_qla_host *ha)
afaf5a2d
DS
3587{
3588 unsigned long pio, pio_len, pio_flags;
3589 unsigned long mmio, mmio_len, mmio_flags;
3590
3591 pio = pci_resource_start(ha->pdev, 0);
3592 pio_len = pci_resource_len(ha->pdev, 0);
3593 pio_flags = pci_resource_flags(ha->pdev, 0);
3594 if (pio_flags & IORESOURCE_IO) {
3595 if (pio_len < MIN_IOBASE_LEN) {
c2660df3 3596 ql4_printk(KERN_WARNING, ha,
afaf5a2d
DS
3597 "Invalid PCI I/O region size\n");
3598 pio = 0;
3599 }
3600 } else {
c2660df3 3601 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
afaf5a2d
DS
3602 pio = 0;
3603 }
3604
3605 /* Use MMIO operations for all accesses. */
3606 mmio = pci_resource_start(ha->pdev, 1);
3607 mmio_len = pci_resource_len(ha->pdev, 1);
3608 mmio_flags = pci_resource_flags(ha->pdev, 1);
3609
3610 if (!(mmio_flags & IORESOURCE_MEM)) {
c2660df3
VC
3611 ql4_printk(KERN_ERR, ha,
3612 "region #0 not an MMIO resource, aborting\n");
afaf5a2d
DS
3613
3614 goto iospace_error_exit;
3615 }
c2660df3 3616
afaf5a2d 3617 if (mmio_len < MIN_IOBASE_LEN) {
c2660df3
VC
3618 ql4_printk(KERN_ERR, ha,
3619 "Invalid PCI mem region size, aborting\n");
afaf5a2d
DS
3620 goto iospace_error_exit;
3621 }
3622
3623 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
c2660df3
VC
3624 ql4_printk(KERN_WARNING, ha,
3625 "Failed to reserve PIO/MMIO regions\n");
afaf5a2d
DS
3626
3627 goto iospace_error_exit;
3628 }
3629
3630 ha->pio_address = pio;
3631 ha->pio_length = pio_len;
3632 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
3633 if (!ha->reg) {
c2660df3
VC
3634 ql4_printk(KERN_ERR, ha,
3635 "cannot remap MMIO, aborting\n");
afaf5a2d
DS
3636
3637 goto iospace_error_exit;
3638 }
3639
3640 return 0;
3641
3642iospace_error_exit:
3643 return -ENOMEM;
3644}
3645
f4f5df23
VC
3646static struct isp_operations qla4xxx_isp_ops = {
3647 .iospace_config = qla4xxx_iospace_config,
3648 .pci_config = qla4xxx_pci_config,
3649 .disable_intrs = qla4xxx_disable_intrs,
3650 .enable_intrs = qla4xxx_enable_intrs,
3651 .start_firmware = qla4xxx_start_firmware,
3652 .intr_handler = qla4xxx_intr_handler,
3653 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
3654 .reset_chip = qla4xxx_soft_reset,
3655 .reset_firmware = qla4xxx_hw_reset,
3656 .queue_iocb = qla4xxx_queue_iocb,
3657 .complete_iocb = qla4xxx_complete_iocb,
3658 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
3659 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
3660 .get_sys_info = qla4xxx_get_sys_info,
3661};
3662
3663static struct isp_operations qla4_8xxx_isp_ops = {
3664 .iospace_config = qla4_8xxx_iospace_config,
3665 .pci_config = qla4_8xxx_pci_config,
3666 .disable_intrs = qla4_8xxx_disable_intrs,
3667 .enable_intrs = qla4_8xxx_enable_intrs,
3668 .start_firmware = qla4_8xxx_load_risc,
3669 .intr_handler = qla4_8xxx_intr_handler,
3670 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
3671 .reset_chip = qla4_8xxx_isp_reset,
3672 .reset_firmware = qla4_8xxx_stop_firmware,
3673 .queue_iocb = qla4_8xxx_queue_iocb,
3674 .complete_iocb = qla4_8xxx_complete_iocb,
3675 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
3676 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
3677 .get_sys_info = qla4_8xxx_get_sys_info,
3678};
3679
3680uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3681{
3682 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
3683}
3684
3685uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3686{
3687 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
3688}
3689
3690uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3691{
3692 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
3693}
3694
3695uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3696{
3697 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
3698}
3699
2a991c21
MR
3700static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
3701{
3702 struct scsi_qla_host *ha = data;
3703 char *str = buf;
3704 int rc;
3705
3706 switch (type) {
3707 case ISCSI_BOOT_ETH_FLAGS:
3708 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3709 break;
3710 case ISCSI_BOOT_ETH_INDEX:
3711 rc = sprintf(str, "0\n");
3712 break;
3713 case ISCSI_BOOT_ETH_MAC:
3714 rc = sysfs_format_mac(str, ha->my_mac,
3715 MAC_ADDR_LEN);
3716 break;
3717 default:
3718 rc = -ENOSYS;
3719 break;
3720 }
3721 return rc;
3722}
3723
587a1f16 3724static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2a991c21
MR
3725{
3726 int rc;
3727
3728 switch (type) {
3729 case ISCSI_BOOT_ETH_FLAGS:
3730 case ISCSI_BOOT_ETH_MAC:
3731 case ISCSI_BOOT_ETH_INDEX:
3732 rc = S_IRUGO;
3733 break;
3734 default:
3735 rc = 0;
3736 break;
3737 }
3738 return rc;
3739}
3740
3741static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
3742{
3743 struct scsi_qla_host *ha = data;
3744 char *str = buf;
3745 int rc;
3746
3747 switch (type) {
3748 case ISCSI_BOOT_INI_INITIATOR_NAME:
3749 rc = sprintf(str, "%s\n", ha->name_string);
3750 break;
3751 default:
3752 rc = -ENOSYS;
3753 break;
3754 }
3755 return rc;
3756}
3757
587a1f16 3758static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2a991c21
MR
3759{
3760 int rc;
3761
3762 switch (type) {
3763 case ISCSI_BOOT_INI_INITIATOR_NAME:
3764 rc = S_IRUGO;
3765 break;
3766 default:
3767 rc = 0;
3768 break;
3769 }
3770 return rc;
3771}
3772
3773static ssize_t
3774qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
3775 char *buf)
3776{
3777 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
3778 char *str = buf;
3779 int rc;
3780
3781 switch (type) {
3782 case ISCSI_BOOT_TGT_NAME:
3783 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
3784 break;
3785 case ISCSI_BOOT_TGT_IP_ADDR:
3786 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
3787 rc = sprintf(buf, "%pI4\n",
3788 &boot_conn->dest_ipaddr.ip_address);
3789 else
3790 rc = sprintf(str, "%pI6\n",
3791 &boot_conn->dest_ipaddr.ip_address);
3792 break;
3793 case ISCSI_BOOT_TGT_PORT:
3794 rc = sprintf(str, "%d\n", boot_conn->dest_port);
3795 break;
3796 case ISCSI_BOOT_TGT_CHAP_NAME:
3797 rc = sprintf(str, "%.*s\n",
3798 boot_conn->chap.target_chap_name_length,
3799 (char *)&boot_conn->chap.target_chap_name);
3800 break;
3801 case ISCSI_BOOT_TGT_CHAP_SECRET:
3802 rc = sprintf(str, "%.*s\n",
3803 boot_conn->chap.target_secret_length,
3804 (char *)&boot_conn->chap.target_secret);
3805 break;
3806 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3807 rc = sprintf(str, "%.*s\n",
3808 boot_conn->chap.intr_chap_name_length,
3809 (char *)&boot_conn->chap.intr_chap_name);
3810 break;
3811 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3812 rc = sprintf(str, "%.*s\n",
3813 boot_conn->chap.intr_secret_length,
3814 (char *)&boot_conn->chap.intr_secret);
3815 break;
3816 case ISCSI_BOOT_TGT_FLAGS:
3817 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3818 break;
3819 case ISCSI_BOOT_TGT_NIC_ASSOC:
3820 rc = sprintf(str, "0\n");
3821 break;
3822 default:
3823 rc = -ENOSYS;
3824 break;
3825 }
3826 return rc;
3827}
3828
3829static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
3830{
3831 struct scsi_qla_host *ha = data;
3832 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
3833
3834 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3835}
3836
3837static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
3838{
3839 struct scsi_qla_host *ha = data;
3840 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
3841
3842 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3843}
3844
587a1f16 3845static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2a991c21
MR
3846{
3847 int rc;
3848
3849 switch (type) {
3850 case ISCSI_BOOT_TGT_NAME:
3851 case ISCSI_BOOT_TGT_IP_ADDR:
3852 case ISCSI_BOOT_TGT_PORT:
3853 case ISCSI_BOOT_TGT_CHAP_NAME:
3854 case ISCSI_BOOT_TGT_CHAP_SECRET:
3855 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3856 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3857 case ISCSI_BOOT_TGT_NIC_ASSOC:
3858 case ISCSI_BOOT_TGT_FLAGS:
3859 rc = S_IRUGO;
3860 break;
3861 default:
3862 rc = 0;
3863 break;
3864 }
3865 return rc;
3866}
3867
3868static void qla4xxx_boot_release(void *data)
3869{
3870 struct scsi_qla_host *ha = data;
3871
3872 scsi_host_put(ha->host);
3873}
3874
3875static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3876{
3877 dma_addr_t buf_dma;
3878 uint32_t addr, pri_addr, sec_addr;
3879 uint32_t offset;
3880 uint16_t func_num;
3881 uint8_t val;
3882 uint8_t *buf = NULL;
3883 size_t size = 13 * sizeof(uint8_t);
3884 int ret = QLA_SUCCESS;
3885
3886 func_num = PCI_FUNC(ha->pdev->devfn);
3887
0d5b36b8
MR
3888 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
3889 __func__, ha->pdev->device, func_num);
2a991c21 3890
0d5b36b8 3891 if (is_qla40XX(ha)) {
2a991c21
MR
3892 if (func_num == 1) {
3893 addr = NVRAM_PORT0_BOOT_MODE;
3894 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
3895 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
3896 } else if (func_num == 3) {
3897 addr = NVRAM_PORT1_BOOT_MODE;
3898 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
3899 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
3900 } else {
3901 ret = QLA_ERROR;
3902 goto exit_boot_info;
3903 }
3904
3905 /* Check Boot Mode */
3906 val = rd_nvram_byte(ha, addr);
3907 if (!(val & 0x07)) {
e8fb00e0
MR
3908 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
3909 "options : 0x%x\n", __func__, val));
2a991c21
MR
3910 ret = QLA_ERROR;
3911 goto exit_boot_info;
3912 }
3913
3914 /* get primary valid target index */
3915 val = rd_nvram_byte(ha, pri_addr);
3916 if (val & BIT_7)
3917 ddb_index[0] = (val & 0x7f);
2a991c21
MR
3918
3919 /* get secondary valid target index */
3920 val = rd_nvram_byte(ha, sec_addr);
3921 if (val & BIT_7)
3922 ddb_index[1] = (val & 0x7f);
2a991c21
MR
3923
3924 } else if (is_qla8022(ha)) {
3925 buf = dma_alloc_coherent(&ha->pdev->dev, size,
3926 &buf_dma, GFP_KERNEL);
3927 if (!buf) {
3928 DEBUG2(ql4_printk(KERN_ERR, ha,
3929 "%s: Unable to allocate dma buffer\n",
3930 __func__));
3931 ret = QLA_ERROR;
3932 goto exit_boot_info;
3933 }
3934
3935 if (ha->port_num == 0)
3936 offset = BOOT_PARAM_OFFSET_PORT0;
3937 else if (ha->port_num == 1)
3938 offset = BOOT_PARAM_OFFSET_PORT1;
3939 else {
3940 ret = QLA_ERROR;
3941 goto exit_boot_info_free;
3942 }
3943 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
3944 offset;
3945 if (qla4xxx_get_flash(ha, buf_dma, addr,
3946 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
3947 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
0bd7f842 3948 " failed\n", ha->host_no, __func__));
2a991c21
MR
3949 ret = QLA_ERROR;
3950 goto exit_boot_info_free;
3951 }
3952 /* Check Boot Mode */
3953 if (!(buf[1] & 0x07)) {
e8fb00e0
MR
3954 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
3955 " : 0x%x\n", buf[1]));
2a991c21
MR
3956 ret = QLA_ERROR;
3957 goto exit_boot_info_free;
3958 }
3959
3960 /* get primary valid target index */
3961 if (buf[2] & BIT_7)
3962 ddb_index[0] = buf[2] & 0x7f;
2a991c21
MR
3963
3964 /* get secondary valid target index */
3965 if (buf[11] & BIT_7)
3966 ddb_index[1] = buf[11] & 0x7f;
2a991c21
MR
3967 } else {
3968 ret = QLA_ERROR;
3969 goto exit_boot_info;
3970 }
3971
3972 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
3973 " target ID %d\n", __func__, ddb_index[0],
3974 ddb_index[1]));
3975
3976exit_boot_info_free:
3977 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
3978exit_boot_info:
20e835b4
LC
3979 ha->pri_ddb_idx = ddb_index[0];
3980 ha->sec_ddb_idx = ddb_index[1];
2a991c21
MR
3981 return ret;
3982}
3983
28deb45c
LC
3984/**
3985 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
3986 * @ha: pointer to adapter structure
3987 * @username: CHAP username to be returned
3988 * @password: CHAP password to be returned
3989 *
3990 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
3991 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
3992 * So from the CHAP cache find the first BIDI CHAP entry and set it
3993 * to the boot record in sysfs.
3994 **/
3995static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
3996 char *password)
3997{
3998 int i, ret = -EINVAL;
3999 int max_chap_entries = 0;
4000 struct ql4_chap_table *chap_table;
4001
4002 if (is_qla8022(ha))
4003 max_chap_entries = (ha->hw.flt_chap_size / 2) /
4004 sizeof(struct ql4_chap_table);
4005 else
4006 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4007
4008 if (!ha->chap_list) {
4009 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4010 return ret;
4011 }
4012
4013 mutex_lock(&ha->chap_sem);
4014 for (i = 0; i < max_chap_entries; i++) {
4015 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4016 if (chap_table->cookie !=
4017 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4018 continue;
4019 }
4020
4021 if (chap_table->flags & BIT_7) /* local */
4022 continue;
4023
4024 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4025 continue;
4026
4027 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4028 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4029 ret = 0;
4030 break;
4031 }
4032 mutex_unlock(&ha->chap_sem);
4033
4034 return ret;
4035}
4036
4037
2a991c21
MR
4038static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4039 struct ql4_boot_session_info *boot_sess,
4040 uint16_t ddb_index)
4041{
4042 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4043 struct dev_db_entry *fw_ddb_entry;
4044 dma_addr_t fw_ddb_entry_dma;
4045 uint16_t idx;
4046 uint16_t options;
4047 int ret = QLA_SUCCESS;
4048
4049 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4050 &fw_ddb_entry_dma, GFP_KERNEL);
4051 if (!fw_ddb_entry) {
4052 DEBUG2(ql4_printk(KERN_ERR, ha,
4053 "%s: Unable to allocate dma buffer.\n",
4054 __func__));
4055 ret = QLA_ERROR;
4056 return ret;
4057 }
4058
4059 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4060 fw_ddb_entry_dma, ddb_index)) {
e8fb00e0
MR
4061 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4062 "index [%d]\n", __func__, ddb_index));
2a991c21
MR
4063 ret = QLA_ERROR;
4064 goto exit_boot_target;
4065 }
4066
4067 /* Update target name and IP from DDB */
4068 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4069 min(sizeof(boot_sess->target_name),
4070 sizeof(fw_ddb_entry->iscsi_name)));
4071
4072 options = le16_to_cpu(fw_ddb_entry->options);
4073 if (options & DDB_OPT_IPV6_DEVICE) {
4074 memcpy(&boot_conn->dest_ipaddr.ip_address,
4075 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4076 } else {
4077 boot_conn->dest_ipaddr.ip_type = 0x1;
4078 memcpy(&boot_conn->dest_ipaddr.ip_address,
4079 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4080 }
4081
4082 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4083
4084 /* update chap information */
4085 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4086
4087 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4088
4089 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4090
4091 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4092 target_chap_name,
4093 (char *)&boot_conn->chap.target_secret,
4094 idx);
4095 if (ret) {
4096 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4097 ret = QLA_ERROR;
4098 goto exit_boot_target;
4099 }
4100
4101 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4102 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4103 }
4104
4105 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4106
4107 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4108
28deb45c
LC
4109 ret = qla4xxx_get_bidi_chap(ha,
4110 (char *)&boot_conn->chap.intr_chap_name,
4111 (char *)&boot_conn->chap.intr_secret);
4112
2a991c21
MR
4113 if (ret) {
4114 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4115 ret = QLA_ERROR;
4116 goto exit_boot_target;
4117 }
4118
4119 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4120 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4121 }
4122
4123exit_boot_target:
4124 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4125 fw_ddb_entry, fw_ddb_entry_dma);
4126 return ret;
4127}
4128
4129static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4130{
4131 uint16_t ddb_index[2];
8de5b958
LC
4132 int ret = QLA_ERROR;
4133 int rval;
2a991c21
MR
4134
4135 memset(ddb_index, 0, sizeof(ddb_index));
8de5b958
LC
4136 ddb_index[0] = 0xffff;
4137 ddb_index[1] = 0xffff;
2a991c21
MR
4138 ret = get_fw_boot_info(ha, ddb_index);
4139 if (ret != QLA_SUCCESS) {
e8fb00e0
MR
4140 DEBUG2(ql4_printk(KERN_INFO, ha,
4141 "%s: No boot target configured.\n", __func__));
2a991c21
MR
4142 return ret;
4143 }
4144
13483730
MC
4145 if (ql4xdisablesysfsboot)
4146 return QLA_SUCCESS;
4147
8de5b958
LC
4148 if (ddb_index[0] == 0xffff)
4149 goto sec_target;
4150
4151 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
2a991c21 4152 ddb_index[0]);
8de5b958 4153 if (rval != QLA_SUCCESS) {
e8fb00e0
MR
4154 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4155 "configured\n", __func__));
8de5b958
LC
4156 } else
4157 ret = QLA_SUCCESS;
2a991c21 4158
8de5b958
LC
4159sec_target:
4160 if (ddb_index[1] == 0xffff)
4161 goto exit_get_boot_info;
4162
4163 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
2a991c21 4164 ddb_index[1]);
8de5b958 4165 if (rval != QLA_SUCCESS) {
e8fb00e0
MR
4166 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4167 " configured\n", __func__));
8de5b958
LC
4168 } else
4169 ret = QLA_SUCCESS;
4170
4171exit_get_boot_info:
2a991c21
MR
4172 return ret;
4173}
4174
4175static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4176{
4177 struct iscsi_boot_kobj *boot_kobj;
4178
4179 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
13483730
MC
4180 return QLA_ERROR;
4181
4182 if (ql4xdisablesysfsboot) {
4183 ql4_printk(KERN_INFO, ha,
0bd7f842 4184 "%s: syfsboot disabled - driver will trigger login "
13483730
MC
4185 "and publish session for discovery .\n", __func__);
4186 return QLA_SUCCESS;
4187 }
4188
2a991c21
MR
4189
4190 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4191 if (!ha->boot_kset)
4192 goto kset_free;
4193
4194 if (!scsi_host_get(ha->host))
4195 goto kset_free;
4196 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4197 qla4xxx_show_boot_tgt_pri_info,
4198 qla4xxx_tgt_get_attr_visibility,
4199 qla4xxx_boot_release);
4200 if (!boot_kobj)
4201 goto put_host;
4202
4203 if (!scsi_host_get(ha->host))
4204 goto kset_free;
4205 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4206 qla4xxx_show_boot_tgt_sec_info,
4207 qla4xxx_tgt_get_attr_visibility,
4208 qla4xxx_boot_release);
4209 if (!boot_kobj)
4210 goto put_host;
4211
4212 if (!scsi_host_get(ha->host))
4213 goto kset_free;
4214 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4215 qla4xxx_show_boot_ini_info,
4216 qla4xxx_ini_get_attr_visibility,
4217 qla4xxx_boot_release);
4218 if (!boot_kobj)
4219 goto put_host;
4220
4221 if (!scsi_host_get(ha->host))
4222 goto kset_free;
4223 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4224 qla4xxx_show_boot_eth_info,
4225 qla4xxx_eth_get_attr_visibility,
4226 qla4xxx_boot_release);
4227 if (!boot_kobj)
4228 goto put_host;
4229
13483730 4230 return QLA_SUCCESS;
2a991c21
MR
4231
4232put_host:
4233 scsi_host_put(ha->host);
4234kset_free:
4235 iscsi_boot_destroy_kset(ha->boot_kset);
4236 return -ENOMEM;
4237}
4238
4549415a
LC
4239
4240/**
4241 * qla4xxx_create chap_list - Create CHAP list from FLASH
4242 * @ha: pointer to adapter structure
4243 *
4244 * Read flash and make a list of CHAP entries, during login when a CHAP entry
4245 * is received, it will be checked in this list. If entry exist then the CHAP
4246 * entry index is set in the DDB. If CHAP entry does not exist in this list
4247 * then a new entry is added in FLASH in CHAP table and the index obtained is
4248 * used in the DDB.
4249 **/
4250static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4251{
4252 int rval = 0;
4253 uint8_t *chap_flash_data = NULL;
4254 uint32_t offset;
4255 dma_addr_t chap_dma;
4256 uint32_t chap_size = 0;
4257
4258 if (is_qla40XX(ha))
4259 chap_size = MAX_CHAP_ENTRIES_40XX *
4260 sizeof(struct ql4_chap_table);
4261 else /* Single region contains CHAP info for both
4262 * ports which is divided into half for each port.
4263 */
4264 chap_size = ha->hw.flt_chap_size / 2;
4265
4266 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4267 &chap_dma, GFP_KERNEL);
4268 if (!chap_flash_data) {
4269 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4270 return;
4271 }
4272 if (is_qla40XX(ha))
4273 offset = FLASH_CHAP_OFFSET;
4274 else {
4275 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4276 if (ha->port_num == 1)
4277 offset += chap_size;
4278 }
4279
4280 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4281 if (rval != QLA_SUCCESS)
4282 goto exit_chap_list;
4283
4284 if (ha->chap_list == NULL)
4285 ha->chap_list = vmalloc(chap_size);
4286 if (ha->chap_list == NULL) {
4287 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4288 goto exit_chap_list;
4289 }
4290
4291 memcpy(ha->chap_list, chap_flash_data, chap_size);
4292
4293exit_chap_list:
4294 dma_free_coherent(&ha->pdev->dev, chap_size,
4295 chap_flash_data, chap_dma);
4549415a
LC
4296}
4297
13483730
MC
4298static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4299 struct ql4_tuple_ddb *tddb)
4300{
4301 struct scsi_qla_host *ha;
4302 struct iscsi_cls_session *cls_sess;
4303 struct iscsi_cls_conn *cls_conn;
4304 struct iscsi_session *sess;
4305 struct iscsi_conn *conn;
4306
4307 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4308 ha = ddb_entry->ha;
4309 cls_sess = ddb_entry->sess;
4310 sess = cls_sess->dd_data;
4311 cls_conn = ddb_entry->conn;
4312 conn = cls_conn->dd_data;
4313
4314 tddb->tpgt = sess->tpgt;
4315 tddb->port = conn->persistent_port;
4316 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4317 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4318}
4319
4320static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
1cb78d73
VC
4321 struct ql4_tuple_ddb *tddb,
4322 uint8_t *flash_isid)
13483730
MC
4323{
4324 uint16_t options = 0;
4325
4326 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4327 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4328 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4329
4330 options = le16_to_cpu(fw_ddb_entry->options);
4331 if (options & DDB_OPT_IPV6_DEVICE)
4332 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4333 else
4334 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4335
4336 tddb->port = le16_to_cpu(fw_ddb_entry->port);
1cb78d73
VC
4337
4338 if (flash_isid == NULL)
4339 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4340 sizeof(tddb->isid));
4341 else
4342 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
13483730
MC
4343}
4344
4345static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4346 struct ql4_tuple_ddb *old_tddb,
173269ef
MR
4347 struct ql4_tuple_ddb *new_tddb,
4348 uint8_t is_isid_compare)
13483730
MC
4349{
4350 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4351 return QLA_ERROR;
4352
4353 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4354 return QLA_ERROR;
4355
4356 if (old_tddb->port != new_tddb->port)
4357 return QLA_ERROR;
4358
173269ef
MR
4359 /* For multi sessions, driver generates the ISID, so do not compare
4360 * ISID in reset path since it would be a comparision between the
4361 * driver generated ISID and firmware generated ISID. This could
4362 * lead to adding duplicated DDBs in the list as driver generated
4363 * ISID would not match firmware generated ISID.
4364 */
4365 if (is_isid_compare) {
4366 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4367 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4368 __func__, old_tddb->isid[5], old_tddb->isid[4],
4369 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4370 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4371 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4372 new_tddb->isid[0]));
4373
4374 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4375 sizeof(old_tddb->isid)))
4376 return QLA_ERROR;
4377 }
4378
13483730
MC
4379 DEBUG2(ql4_printk(KERN_INFO, ha,
4380 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4381 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4382 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4383 new_tddb->ip_addr, new_tddb->iscsi_name));
4384
4385 return QLA_SUCCESS;
4386}
4387
4388static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4389 struct dev_db_entry *fw_ddb_entry)
4390{
4391 struct ddb_entry *ddb_entry;
4392 struct ql4_tuple_ddb *fw_tddb = NULL;
4393 struct ql4_tuple_ddb *tmp_tddb = NULL;
4394 int idx;
4395 int ret = QLA_ERROR;
4396
4397 fw_tddb = vzalloc(sizeof(*fw_tddb));
4398 if (!fw_tddb) {
4399 DEBUG2(ql4_printk(KERN_WARNING, ha,
4400 "Memory Allocation failed.\n"));
4401 ret = QLA_SUCCESS;
4402 goto exit_check;
4403 }
4404
4405 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4406 if (!tmp_tddb) {
4407 DEBUG2(ql4_printk(KERN_WARNING, ha,
4408 "Memory Allocation failed.\n"));
4409 ret = QLA_SUCCESS;
4410 goto exit_check;
4411 }
4412
1cb78d73 4413 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
13483730
MC
4414
4415 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4416 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4417 if (ddb_entry == NULL)
4418 continue;
4419
4420 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
173269ef 4421 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
13483730
MC
4422 ret = QLA_SUCCESS; /* found */
4423 goto exit_check;
4424 }
4425 }
4426
4427exit_check:
4428 if (fw_tddb)
4429 vfree(fw_tddb);
4430 if (tmp_tddb)
4431 vfree(tmp_tddb);
4432 return ret;
4433}
4434
1cb78d73
VC
4435/**
4436 * qla4xxx_check_existing_isid - check if target with same isid exist
4437 * in target list
4438 * @list_nt: list of target
4439 * @isid: isid to check
4440 *
4441 * This routine return QLA_SUCCESS if target with same isid exist
4442 **/
4443static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
4444{
4445 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4446 struct dev_db_entry *fw_ddb_entry;
4447
4448 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4449 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
4450
4451 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
4452 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
4453 return QLA_SUCCESS;
4454 }
4455 }
4456 return QLA_ERROR;
4457}
4458
4459/**
4460 * qla4xxx_update_isid - compare ddbs and updated isid
4461 * @ha: Pointer to host adapter structure.
4462 * @list_nt: list of nt target
4463 * @fw_ddb_entry: firmware ddb entry
4464 *
4465 * This routine update isid if ddbs have same iqn, same isid and
4466 * different IP addr.
4467 * Return QLA_SUCCESS if isid is updated.
4468 **/
4469static int qla4xxx_update_isid(struct scsi_qla_host *ha,
4470 struct list_head *list_nt,
4471 struct dev_db_entry *fw_ddb_entry)
4472{
4473 uint8_t base_value, i;
4474
4475 base_value = fw_ddb_entry->isid[1] & 0x1f;
4476 for (i = 0; i < 8; i++) {
4477 fw_ddb_entry->isid[1] = (base_value | (i << 5));
4478 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4479 break;
4480 }
4481
4482 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4483 return QLA_ERROR;
4484
4485 return QLA_SUCCESS;
4486}
4487
4488/**
4489 * qla4xxx_should_update_isid - check if isid need to update
4490 * @ha: Pointer to host adapter structure.
4491 * @old_tddb: ddb tuple
4492 * @new_tddb: ddb tuple
4493 *
4494 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
4495 * same isid
4496 **/
4497static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
4498 struct ql4_tuple_ddb *old_tddb,
4499 struct ql4_tuple_ddb *new_tddb)
4500{
4501 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
4502 /* Same ip */
4503 if (old_tddb->port == new_tddb->port)
4504 return QLA_ERROR;
4505 }
4506
4507 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4508 /* different iqn */
4509 return QLA_ERROR;
4510
4511 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4512 sizeof(old_tddb->isid)))
4513 /* different isid */
4514 return QLA_ERROR;
4515
4516 return QLA_SUCCESS;
4517}
4518
4519/**
4520 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
4521 * @ha: Pointer to host adapter structure.
4522 * @list_nt: list of nt target.
4523 * @fw_ddb_entry: firmware ddb entry.
4524 *
4525 * This routine check if fw_ddb_entry already exists in list_nt to avoid
4526 * duplicate ddb in list_nt.
4527 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
4528 * Note: This function also update isid of DDB if required.
4529 **/
4530
13483730
MC
4531static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4532 struct list_head *list_nt,
4533 struct dev_db_entry *fw_ddb_entry)
4534{
4535 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4536 struct ql4_tuple_ddb *fw_tddb = NULL;
4537 struct ql4_tuple_ddb *tmp_tddb = NULL;
1cb78d73 4538 int rval, ret = QLA_ERROR;
13483730
MC
4539
4540 fw_tddb = vzalloc(sizeof(*fw_tddb));
4541 if (!fw_tddb) {
4542 DEBUG2(ql4_printk(KERN_WARNING, ha,
4543 "Memory Allocation failed.\n"));
4544 ret = QLA_SUCCESS;
4545 goto exit_check;
4546 }
4547
4548 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4549 if (!tmp_tddb) {
4550 DEBUG2(ql4_printk(KERN_WARNING, ha,
4551 "Memory Allocation failed.\n"));
4552 ret = QLA_SUCCESS;
4553 goto exit_check;
4554 }
4555
1cb78d73 4556 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
13483730
MC
4557
4558 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
1cb78d73
VC
4559 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
4560 nt_ddb_idx->flash_isid);
4561 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
4562 /* found duplicate ddb */
4563 if (ret == QLA_SUCCESS)
4564 goto exit_check;
4565 }
4566
4567 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4568 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
4569
4570 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
4571 if (ret == QLA_SUCCESS) {
4572 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
4573 if (rval == QLA_SUCCESS)
4574 ret = QLA_ERROR;
4575 else
4576 ret = QLA_SUCCESS;
4577
13483730
MC
4578 goto exit_check;
4579 }
4580 }
4581
4582exit_check:
4583 if (fw_tddb)
4584 vfree(fw_tddb);
4585 if (tmp_tddb)
4586 vfree(tmp_tddb);
4587 return ret;
4588}
4589
4a4bc2e9 4590static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
13483730 4591{
4a4bc2e9 4592 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
13483730 4593
4a4bc2e9
LC
4594 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4595 list_del_init(&ddb_idx->list);
4596 vfree(ddb_idx);
13483730 4597 }
13483730
MC
4598}
4599
4600static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
4601 struct dev_db_entry *fw_ddb_entry)
4602{
4603 struct iscsi_endpoint *ep;
4604 struct sockaddr_in *addr;
4605 struct sockaddr_in6 *addr6;
4606 struct sockaddr *dst_addr;
4607 char *ip;
4608
4609 /* TODO: need to destroy on unload iscsi_endpoint*/
4610 dst_addr = vmalloc(sizeof(*dst_addr));
4611 if (!dst_addr)
4612 return NULL;
4613
4614 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
4615 dst_addr->sa_family = AF_INET6;
4616 addr6 = (struct sockaddr_in6 *)dst_addr;
4617 ip = (char *)&addr6->sin6_addr;
4618 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
4619 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
4620
4621 } else {
4622 dst_addr->sa_family = AF_INET;
4623 addr = (struct sockaddr_in *)dst_addr;
4624 ip = (char *)&addr->sin_addr;
4625 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
4626 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
4627 }
4628
4629 ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
4630 vfree(dst_addr);
4631 return ep;
4632}
4633
4634static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
4635{
4636 if (ql4xdisablesysfsboot)
4637 return QLA_SUCCESS;
4638 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
4639 return QLA_ERROR;
4640 return QLA_SUCCESS;
4641}
4642
4643static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4644 struct ddb_entry *ddb_entry)
4645{
c28eaaca
NJ
4646 uint16_t def_timeout;
4647
13483730
MC
4648 ddb_entry->ddb_type = FLASH_DDB;
4649 ddb_entry->fw_ddb_index = INVALID_ENTRY;
4650 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
4651 ddb_entry->ha = ha;
4652 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
4653 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
4654
4655 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
4656 atomic_set(&ddb_entry->relogin_timer, 0);
4657 atomic_set(&ddb_entry->relogin_retry_count, 0);
c28eaaca 4658 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
13483730 4659 ddb_entry->default_relogin_timeout =
c28eaaca
NJ
4660 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
4661 def_timeout : LOGIN_TOV;
13483730
MC
4662 ddb_entry->default_time2wait =
4663 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
4664}
4665
4666static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
4667{
4668 uint32_t idx = 0;
4669 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
4670 uint32_t sts[MBOX_REG_COUNT];
4671 uint32_t ip_state;
4672 unsigned long wtime;
4673 int ret;
4674
4675 wtime = jiffies + (HZ * IP_CONFIG_TOV);
4676 do {
4677 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
4678 if (ip_idx[idx] == -1)
4679 continue;
4680
4681 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
4682
4683 if (ret == QLA_ERROR) {
4684 ip_idx[idx] = -1;
4685 continue;
4686 }
4687
4688 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
4689
4690 DEBUG2(ql4_printk(KERN_INFO, ha,
4691 "Waiting for IP state for idx = %d, state = 0x%x\n",
4692 ip_idx[idx], ip_state));
4693 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
4694 ip_state == IP_ADDRSTATE_INVALID ||
4695 ip_state == IP_ADDRSTATE_PREFERRED ||
4696 ip_state == IP_ADDRSTATE_DEPRICATED ||
4697 ip_state == IP_ADDRSTATE_DISABLING)
4698 ip_idx[idx] = -1;
13483730
MC
4699 }
4700
4701 /* Break if all IP states checked */
4702 if ((ip_idx[0] == -1) &&
4703 (ip_idx[1] == -1) &&
4704 (ip_idx[2] == -1) &&
4705 (ip_idx[3] == -1))
4706 break;
4707 schedule_timeout_uninterruptible(HZ);
4708 } while (time_after(wtime, jiffies));
4709}
4710
4a4bc2e9
LC
4711static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4712 struct list_head *list_st)
13483730 4713{
4a4bc2e9 4714 struct qla_ddb_index *st_ddb_idx;
13483730 4715 int max_ddbs;
4a4bc2e9
LC
4716 int fw_idx_size;
4717 struct dev_db_entry *fw_ddb_entry;
4718 dma_addr_t fw_ddb_dma;
13483730
MC
4719 int ret;
4720 uint32_t idx = 0, next_idx = 0;
4721 uint32_t state = 0, conn_err = 0;
4a4bc2e9 4722 uint16_t conn_id = 0;
13483730
MC
4723
4724 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4725 &fw_ddb_dma);
4726 if (fw_ddb_entry == NULL) {
4727 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4a4bc2e9 4728 goto exit_st_list;
13483730
MC
4729 }
4730
4a4bc2e9
LC
4731 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4732 MAX_DEV_DB_ENTRIES;
13483730
MC
4733 fw_idx_size = sizeof(struct qla_ddb_index);
4734
4735 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4a4bc2e9
LC
4736 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4737 NULL, &next_idx, &state,
4738 &conn_err, NULL, &conn_id);
13483730
MC
4739 if (ret == QLA_ERROR)
4740 break;
4741
981c982c
LC
4742 /* Ignore DDB if invalid state (unassigned) */
4743 if (state == DDB_DS_UNASSIGNED)
4744 goto continue_next_st;
4745
13483730
MC
4746 /* Check if ST, add to the list_st */
4747 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4748 goto continue_next_st;
4749
4750 st_ddb_idx = vzalloc(fw_idx_size);
4751 if (!st_ddb_idx)
4752 break;
4753
4754 st_ddb_idx->fw_ddb_idx = idx;
4755
4a4bc2e9 4756 list_add_tail(&st_ddb_idx->list, list_st);
13483730
MC
4757continue_next_st:
4758 if (next_idx == 0)
4759 break;
4760 }
4761
4a4bc2e9
LC
4762exit_st_list:
4763 if (fw_ddb_entry)
4764 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4765}
4766
4767/**
4768 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4769 * @ha: pointer to adapter structure
4770 * @list_ddb: List from which failed ddb to be removed
4771 *
4772 * Iterate over the list of DDBs and find and remove DDBs that are either in
4773 * no connection active state or failed state
4774 **/
4775static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4776 struct list_head *list_ddb)
4777{
4778 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
4779 uint32_t next_idx = 0;
4780 uint32_t state = 0, conn_err = 0;
4781 int ret;
4782
4783 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4784 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4785 NULL, 0, NULL, &next_idx, &state,
4786 &conn_err, NULL, NULL);
4787 if (ret == QLA_ERROR)
4788 continue;
4789
4790 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4791 state == DDB_DS_SESSION_FAILED) {
4792 list_del_init(&ddb_idx->list);
4793 vfree(ddb_idx);
4794 }
4795 }
4796}
4797
4798static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4799 struct dev_db_entry *fw_ddb_entry,
4800 int is_reset)
4801{
4802 struct iscsi_cls_session *cls_sess;
4803 struct iscsi_session *sess;
4804 struct iscsi_cls_conn *cls_conn;
4805 struct iscsi_endpoint *ep;
4806 uint16_t cmds_max = 32;
4807 uint16_t conn_id = 0;
4808 uint32_t initial_cmdsn = 0;
4809 int ret = QLA_SUCCESS;
4810
4811 struct ddb_entry *ddb_entry = NULL;
4812
4813 /* Create session object, with INVALID_ENTRY,
4814 * the targer_id would get set when we issue the login
13483730 4815 */
4a4bc2e9
LC
4816 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4817 cmds_max, sizeof(struct ddb_entry),
4818 sizeof(struct ql4_task_data),
4819 initial_cmdsn, INVALID_ENTRY);
4820 if (!cls_sess) {
4821 ret = QLA_ERROR;
4822 goto exit_setup;
4823 }
13483730 4824
4a4bc2e9
LC
4825 /*
4826 * so calling module_put function to decrement the
4827 * reference count.
4828 **/
4829 module_put(qla4xxx_iscsi_transport.owner);
4830 sess = cls_sess->dd_data;
4831 ddb_entry = sess->dd_data;
4832 ddb_entry->sess = cls_sess;
4833
4834 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4835 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4836 sizeof(struct dev_db_entry));
4837
4838 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4839
4840 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4841
4842 if (!cls_conn) {
4843 ret = QLA_ERROR;
4844 goto exit_setup;
13483730
MC
4845 }
4846
4a4bc2e9 4847 ddb_entry->conn = cls_conn;
13483730 4848
4a4bc2e9
LC
4849 /* Setup ep, for displaying attributes in sysfs */
4850 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4851 if (ep) {
4852 ep->conn = cls_conn;
4853 cls_conn->ep = ep;
4854 } else {
4855 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4856 ret = QLA_ERROR;
4857 goto exit_setup;
4858 }
13483730 4859
4a4bc2e9
LC
4860 /* Update sess/conn params */
4861 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
13483730 4862
4a4bc2e9
LC
4863 if (is_reset == RESET_ADAPTER) {
4864 iscsi_block_session(cls_sess);
4865 /* Use the relogin path to discover new devices
4866 * by short-circuting the logic of setting
4867 * timer to relogin - instead set the flags
4868 * to initiate login right away.
4869 */
4870 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4871 set_bit(DF_RELOGIN, &ddb_entry->flags);
4872 }
4873
4874exit_setup:
4875 return ret;
4876}
4877
4878static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4879 struct list_head *list_nt, int is_reset)
4880{
4881 struct dev_db_entry *fw_ddb_entry;
4882 dma_addr_t fw_ddb_dma;
4883 int max_ddbs;
4884 int fw_idx_size;
4885 int ret;
4886 uint32_t idx = 0, next_idx = 0;
4887 uint32_t state = 0, conn_err = 0;
4888 uint16_t conn_id = 0;
4889 struct qla_ddb_index *nt_ddb_idx;
4890
4891 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4892 &fw_ddb_dma);
4893 if (fw_ddb_entry == NULL) {
4894 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4895 goto exit_nt_list;
13483730 4896 }
4a4bc2e9
LC
4897 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4898 MAX_DEV_DB_ENTRIES;
4899 fw_idx_size = sizeof(struct qla_ddb_index);
13483730
MC
4900
4901 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4a4bc2e9
LC
4902 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4903 NULL, &next_idx, &state,
4904 &conn_err, NULL, &conn_id);
13483730
MC
4905 if (ret == QLA_ERROR)
4906 break;
4907
4908 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4909 goto continue_next_nt;
4910
4911 /* Check if NT, then add to list it */
4912 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4913 goto continue_next_nt;
4914
4a4bc2e9
LC
4915 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4916 state == DDB_DS_SESSION_FAILED))
4917 goto continue_next_nt;
13483730 4918
4a4bc2e9
LC
4919 DEBUG2(ql4_printk(KERN_INFO, ha,
4920 "Adding DDB to session = 0x%x\n", idx));
4921 if (is_reset == INIT_ADAPTER) {
4922 nt_ddb_idx = vmalloc(fw_idx_size);
4923 if (!nt_ddb_idx)
4924 break;
13483730 4925
4a4bc2e9 4926 nt_ddb_idx->fw_ddb_idx = idx;
13483730 4927
1cb78d73
VC
4928 /* Copy original isid as it may get updated in function
4929 * qla4xxx_update_isid(). We need original isid in
4930 * function qla4xxx_compare_tuple_ddb to find duplicate
4931 * target */
4932 memcpy(&nt_ddb_idx->flash_isid[0],
4933 &fw_ddb_entry->isid[0],
4934 sizeof(nt_ddb_idx->flash_isid));
4935
4936 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
4937 fw_ddb_entry);
4938 if (ret == QLA_SUCCESS) {
4939 /* free nt_ddb_idx and do not add to list_nt */
4a4bc2e9
LC
4940 vfree(nt_ddb_idx);
4941 goto continue_next_nt;
13483730 4942 }
1cb78d73
VC
4943
4944 /* Copy updated isid */
4945 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4946 sizeof(struct dev_db_entry));
4947
4a4bc2e9
LC
4948 list_add_tail(&nt_ddb_idx->list, list_nt);
4949 } else if (is_reset == RESET_ADAPTER) {
4950 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4951 QLA_SUCCESS)
4952 goto continue_next_nt;
13483730 4953 }
4a4bc2e9
LC
4954
4955 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4956 if (ret == QLA_ERROR)
4957 goto exit_nt_list;
4958
13483730
MC
4959continue_next_nt:
4960 if (next_idx == 0)
4961 break;
4962 }
4a4bc2e9
LC
4963
4964exit_nt_list:
13483730
MC
4965 if (fw_ddb_entry)
4966 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4a4bc2e9
LC
4967}
4968
4969/**
4970 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4971 * @ha: pointer to adapter structure
4972 * @is_reset: Is this init path or reset path
4973 *
4974 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4975 * using connection open, then create the list of normal targets (nt)
4976 * from firmware DDBs. Based on the list of nt setup session and connection
4977 * objects.
4978 **/
4979void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4980{
4981 uint16_t tmo = 0;
4982 struct list_head list_st, list_nt;
4983 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
4984 unsigned long wtime;
4985
4986 if (!test_bit(AF_LINK_UP, &ha->flags)) {
4987 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
4988 ha->is_reset = is_reset;
4989 return;
4990 }
4991
4992 INIT_LIST_HEAD(&list_st);
4993 INIT_LIST_HEAD(&list_nt);
4994
4995 qla4xxx_build_st_list(ha, &list_st);
4996
4997 /* Before issuing conn open mbox, ensure all IPs states are configured
4998 * Note, conn open fails if IPs are not configured
4999 */
5000 qla4xxx_wait_for_ip_configuration(ha);
5001
5002 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
5003 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
5004 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
5005 }
5006
5007 /* Wait to ensure all sendtargets are done for min 12 sec wait */
c28eaaca
NJ
5008 tmo = ((ha->def_timeout > LOGIN_TOV) &&
5009 (ha->def_timeout < LOGIN_TOV * 10) ?
5010 ha->def_timeout : LOGIN_TOV);
5011
4a4bc2e9
LC
5012 DEBUG2(ql4_printk(KERN_INFO, ha,
5013 "Default time to wait for build ddb %d\n", tmo));
5014
5015 wtime = jiffies + (HZ * tmo);
5016 do {
f1f2e60e
NJ
5017 if (list_empty(&list_st))
5018 break;
5019
4a4bc2e9
LC
5020 qla4xxx_remove_failed_ddb(ha, &list_st);
5021 schedule_timeout_uninterruptible(HZ / 10);
5022 } while (time_after(wtime, jiffies));
5023
5024 /* Free up the sendtargets list */
5025 qla4xxx_free_ddb_list(&list_st);
5026
5027 qla4xxx_build_nt_list(ha, &list_nt, is_reset);
5028
5029 qla4xxx_free_ddb_list(&list_nt);
13483730
MC
5030
5031 qla4xxx_free_ddb_index(ha);
5032}
5033
afaf5a2d
DS
5034/**
5035 * qla4xxx_probe_adapter - callback function to probe HBA
5036 * @pdev: pointer to pci_dev structure
5037 * @pci_device_id: pointer to pci_device entry
5038 *
5039 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
5040 * It returns zero if successful. It also initializes all data necessary for
5041 * the driver.
5042 **/
5043static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5044 const struct pci_device_id *ent)
5045{
5046 int ret = -ENODEV, status;
5047 struct Scsi_Host *host;
5048 struct scsi_qla_host *ha;
afaf5a2d
DS
5049 uint8_t init_retry_count = 0;
5050 char buf[34];
f4f5df23 5051 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
f9880e76 5052 uint32_t dev_state;
afaf5a2d
DS
5053
5054 if (pci_enable_device(pdev))
5055 return -1;
5056
b3a271a9 5057 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
afaf5a2d
DS
5058 if (host == NULL) {
5059 printk(KERN_WARNING
5060 "qla4xxx: Couldn't allocate host from scsi layer!\n");
5061 goto probe_disable_device;
5062 }
5063
5064 /* Clear our data area */
b3a271a9 5065 ha = to_qla_host(host);
afaf5a2d
DS
5066 memset(ha, 0, sizeof(*ha));
5067
5068 /* Save the information from PCI BIOS. */
5069 ha->pdev = pdev;
5070 ha->host = host;
5071 ha->host_no = host->host_no;
5072
2232be0d
LC
5073 pci_enable_pcie_error_reporting(pdev);
5074
f4f5df23
VC
5075 /* Setup Runtime configurable options */
5076 if (is_qla8022(ha)) {
5077 ha->isp_ops = &qla4_8xxx_isp_ops;
5078 rwlock_init(&ha->hw_lock);
5079 ha->qdr_sn_window = -1;
5080 ha->ddr_mn_window = -1;
5081 ha->curr_window = 255;
5082 ha->func_num = PCI_FUNC(ha->pdev->devfn);
5083 nx_legacy_intr = &legacy_intr[ha->func_num];
5084 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
5085 ha->nx_legacy_intr.tgt_status_reg =
5086 nx_legacy_intr->tgt_status_reg;
5087 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
5088 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
5089 } else {
5090 ha->isp_ops = &qla4xxx_isp_ops;
5091 }
5092
2232be0d
LC
5093 /* Set EEH reset type to fundamental if required by hba */
5094 if (is_qla8022(ha))
5095 pdev->needs_freset = 1;
5096
afaf5a2d 5097 /* Configure PCI I/O space. */
f4f5df23 5098 ret = ha->isp_ops->iospace_config(ha);
afaf5a2d 5099 if (ret)
f4f5df23 5100 goto probe_failed_ioconfig;
afaf5a2d 5101
c2660df3 5102 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
afaf5a2d
DS
5103 pdev->device, pdev->irq, ha->reg);
5104
5105 qla4xxx_config_dma_addressing(ha);
5106
5107 /* Initialize lists and spinlocks. */
afaf5a2d
DS
5108 INIT_LIST_HEAD(&ha->free_srb_q);
5109
5110 mutex_init(&ha->mbox_sem);
4549415a 5111 mutex_init(&ha->chap_sem);
f4f5df23 5112 init_completion(&ha->mbx_intr_comp);
95d31262 5113 init_completion(&ha->disable_acb_comp);
afaf5a2d
DS
5114
5115 spin_lock_init(&ha->hardware_lock);
8e9157c8 5116 spin_lock_init(&ha->work_lock);
afaf5a2d 5117
ff884430
VC
5118 /* Initialize work list */
5119 INIT_LIST_HEAD(&ha->work_list);
5120
afaf5a2d
DS
5121 /* Allocate dma buffers */
5122 if (qla4xxx_mem_alloc(ha)) {
c2660df3
VC
5123 ql4_printk(KERN_WARNING, ha,
5124 "[ERROR] Failed to allocate memory for adapter\n");
afaf5a2d
DS
5125
5126 ret = -ENOMEM;
5127 goto probe_failed;
5128 }
5129
b3a271a9
MR
5130 host->cmd_per_lun = 3;
5131 host->max_channel = 0;
5132 host->max_lun = MAX_LUNS - 1;
5133 host->max_id = MAX_TARGETS;
5134 host->max_cmd_len = IOCB_MAX_CDB_LEN;
5135 host->can_queue = MAX_SRBS ;
5136 host->transportt = qla4xxx_scsi_transport;
5137
5138 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
5139 if (ret) {
5140 ql4_printk(KERN_WARNING, ha,
5141 "%s: scsi_init_shared_tag_map failed\n", __func__);
5142 goto probe_failed;
5143 }
5144
5145 pci_set_drvdata(pdev, ha);
5146
5147 ret = scsi_add_host(host, &pdev->dev);
5148 if (ret)
5149 goto probe_failed;
5150
f4f5df23
VC
5151 if (is_qla8022(ha))
5152 (void) qla4_8xxx_get_flash_info(ha);
5153
afaf5a2d
DS
5154 /*
5155 * Initialize the Host adapter request/response queues and
5156 * firmware
5157 * NOTE: interrupts enabled upon successful completion
5158 */
13483730 5159 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
f4f5df23
VC
5160 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
5161 init_retry_count++ < MAX_INIT_RETRIES) {
f9880e76
PM
5162
5163 if (is_qla8022(ha)) {
5164 qla4_8xxx_idc_lock(ha);
5165 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
5166 qla4_8xxx_idc_unlock(ha);
5167 if (dev_state == QLA82XX_DEV_FAILED) {
5168 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
5169 "initialize adapter. H/W is in failed state\n",
5170 __func__);
5171 break;
5172 }
5173 }
afaf5a2d
DS
5174 DEBUG2(printk("scsi: %s: retrying adapter initialization "
5175 "(%d)\n", __func__, init_retry_count));
f4f5df23
VC
5176
5177 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
5178 continue;
5179
13483730 5180 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
afaf5a2d 5181 }
f4f5df23
VC
5182
5183 if (!test_bit(AF_ONLINE, &ha->flags)) {
c2660df3 5184 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
afaf5a2d 5185
fe998527
LC
5186 if (is_qla8022(ha) && ql4xdontresethba) {
5187 /* Put the device in failed state. */
5188 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
5189 qla4_8xxx_idc_lock(ha);
5190 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5191 QLA82XX_DEV_FAILED);
5192 qla4_8xxx_idc_unlock(ha);
5193 }
afaf5a2d 5194 ret = -ENODEV;
b3a271a9 5195 goto remove_host;
afaf5a2d
DS
5196 }
5197
afaf5a2d
DS
5198 /* Startup the kernel thread for this host adapter. */
5199 DEBUG2(printk("scsi: %s: Starting kernel thread for "
5200 "qla4xxx_dpc\n", __func__));
5201 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
5202 ha->dpc_thread = create_singlethread_workqueue(buf);
5203 if (!ha->dpc_thread) {
c2660df3 5204 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
afaf5a2d 5205 ret = -ENODEV;
b3a271a9 5206 goto remove_host;
afaf5a2d 5207 }
c4028958 5208 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
afaf5a2d 5209
b3a271a9
MR
5210 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
5211 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
5212 if (!ha->task_wq) {
5213 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
5214 ret = -ENODEV;
5215 goto remove_host;
5216 }
5217
f4f5df23
VC
5218 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
5219 * (which is called indirectly by qla4xxx_initialize_adapter),
5220 * so that irqs will be registered after crbinit but before
5221 * mbx_intr_enable.
5222 */
5223 if (!is_qla8022(ha)) {
5224 ret = qla4xxx_request_irqs(ha);
5225 if (ret) {
5226 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
5227 "interrupt %d already in use.\n", pdev->irq);
b3a271a9 5228 goto remove_host;
f4f5df23 5229 }
afaf5a2d 5230 }
afaf5a2d 5231
2232be0d 5232 pci_save_state(ha->pdev);
f4f5df23 5233 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
5234
5235 /* Start timer thread. */
5236 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
5237
5238 set_bit(AF_INIT_DONE, &ha->flags);
5239
068237c8
TP
5240 qla4_8xxx_alloc_sysfs_attr(ha);
5241
afaf5a2d
DS
5242 printk(KERN_INFO
5243 " QLogic iSCSI HBA Driver version: %s\n"
5244 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
5245 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
5246 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
5247 ha->patch_number, ha->build_number);
ed1086e0 5248
2a991c21 5249 if (qla4xxx_setup_boot_info(ha))
3573bfb2
VC
5250 ql4_printk(KERN_ERR, ha,
5251 "%s: No iSCSI boot target configured\n", __func__);
2a991c21 5252
13483730
MC
5253 /* Perform the build ddb list and login to each */
5254 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
5255 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
5256
5257 qla4xxx_create_chap_list(ha);
5258
ed1086e0 5259 qla4xxx_create_ifaces(ha);
afaf5a2d
DS
5260 return 0;
5261
b3a271a9
MR
5262remove_host:
5263 scsi_remove_host(ha->host);
5264
afaf5a2d
DS
5265probe_failed:
5266 qla4xxx_free_adapter(ha);
f4f5df23
VC
5267
5268probe_failed_ioconfig:
2232be0d 5269 pci_disable_pcie_error_reporting(pdev);
afaf5a2d
DS
5270 scsi_host_put(ha->host);
5271
5272probe_disable_device:
5273 pci_disable_device(pdev);
5274
5275 return ret;
5276}
5277
7eece5a0
KH
5278/**
5279 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
5280 * @ha: pointer to adapter structure
5281 *
5282 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
5283 * so that the other port will not re-initialize while in the process of
5284 * removing the ha due to driver unload or hba hotplug.
5285 **/
5286static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
5287{
5288 struct scsi_qla_host *other_ha = NULL;
5289 struct pci_dev *other_pdev = NULL;
5290 int fn = ISP4XXX_PCI_FN_2;
5291
5292 /*iscsi function numbers for ISP4xxx is 1 and 3*/
5293 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
5294 fn = ISP4XXX_PCI_FN_1;
5295
5296 other_pdev =
5297 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5298 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5299 fn));
5300
5301 /* Get other_ha if other_pdev is valid and state is enable*/
5302 if (other_pdev) {
5303 if (atomic_read(&other_pdev->enable_cnt)) {
5304 other_ha = pci_get_drvdata(other_pdev);
5305 if (other_ha) {
5306 set_bit(AF_HA_REMOVAL, &other_ha->flags);
5307 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
5308 "Prevent %s reinit\n", __func__,
5309 dev_name(&other_ha->pdev->dev)));
5310 }
5311 }
5312 pci_dev_put(other_pdev);
5313 }
5314}
5315
13483730
MC
5316static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
5317{
5318 struct ddb_entry *ddb_entry;
5319 int options;
5320 int idx;
5321
5322 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
5323
5324 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5325 if ((ddb_entry != NULL) &&
5326 (ddb_entry->ddb_type == FLASH_DDB)) {
5327
5328 options = LOGOUT_OPTION_CLOSE_SESSION;
5329 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
5330 == QLA_ERROR)
5331 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
5332 __func__);
5333
5334 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
5335 /*
5336 * we have decremented the reference count of the driver
5337 * when we setup the session to have the driver unload
5338 * to be seamless without actually destroying the
5339 * session
5340 **/
5341 try_module_get(qla4xxx_iscsi_transport.owner);
5342 iscsi_destroy_endpoint(ddb_entry->conn->ep);
5343 qla4xxx_free_ddb(ha, ddb_entry);
5344 iscsi_session_teardown(ddb_entry->sess);
5345 }
5346 }
5347}
afaf5a2d
DS
5348/**
5349 * qla4xxx_remove_adapter - calback function to remove adapter.
5350 * @pci_dev: PCI device pointer
5351 **/
5352static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5353{
5354 struct scsi_qla_host *ha;
5355
5356 ha = pci_get_drvdata(pdev);
5357
7eece5a0
KH
5358 if (!is_qla8022(ha))
5359 qla4xxx_prevent_other_port_reinit(ha);
bee4fe8e 5360
ed1086e0
VC
5361 /* destroy iface from sysfs */
5362 qla4xxx_destroy_ifaces(ha);
5363
13483730 5364 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
2a991c21
MR
5365 iscsi_boot_destroy_kset(ha->boot_kset);
5366
13483730 5367 qla4xxx_destroy_fw_ddb_session(ha);
068237c8 5368 qla4_8xxx_free_sysfs_attr(ha);
13483730 5369
afaf5a2d
DS
5370 scsi_remove_host(ha->host);
5371
5372 qla4xxx_free_adapter(ha);
5373
5374 scsi_host_put(ha->host);
5375
2232be0d 5376 pci_disable_pcie_error_reporting(pdev);
f4f5df23 5377 pci_disable_device(pdev);
afaf5a2d
DS
5378 pci_set_drvdata(pdev, NULL);
5379}
5380
5381/**
5382 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
5383 * @ha: HA context
5384 *
5385 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
5386 * supported addressing method.
5387 */
47975477 5388static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
afaf5a2d
DS
5389{
5390 int retval;
5391
5392 /* Update our PCI device dma_mask for full 64 bit mask */
6a35528a
YH
5393 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
5394 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
afaf5a2d
DS
5395 dev_dbg(&ha->pdev->dev,
5396 "Failed to set 64 bit PCI consistent mask; "
5397 "using 32 bit.\n");
5398 retval = pci_set_consistent_dma_mask(ha->pdev,
284901a9 5399 DMA_BIT_MASK(32));
afaf5a2d
DS
5400 }
5401 } else
284901a9 5402 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
afaf5a2d
DS
5403}
5404
5405static int qla4xxx_slave_alloc(struct scsi_device *sdev)
5406{
b3a271a9
MR
5407 struct iscsi_cls_session *cls_sess;
5408 struct iscsi_session *sess;
5409 struct ddb_entry *ddb;
8bb4033d 5410 int queue_depth = QL4_DEF_QDEPTH;
afaf5a2d 5411
b3a271a9
MR
5412 cls_sess = starget_to_session(sdev->sdev_target);
5413 sess = cls_sess->dd_data;
5414 ddb = sess->dd_data;
5415
afaf5a2d
DS
5416 sdev->hostdata = ddb;
5417 sdev->tagged_supported = 1;
8bb4033d
VC
5418
5419 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
5420 queue_depth = ql4xmaxqdepth;
5421
5422 scsi_activate_tcq(sdev, queue_depth);
afaf5a2d
DS
5423 return 0;
5424}
5425
5426static int qla4xxx_slave_configure(struct scsi_device *sdev)
5427{
5428 sdev->tagged_supported = 1;
5429 return 0;
5430}
5431
5432static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5433{
5434 scsi_deactivate_tcq(sdev, 1);
5435}
5436
f7b4aa63
TP
5437static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
5438 int reason)
5439{
5440 if (!ql4xqfulltracking)
5441 return -EOPNOTSUPP;
5442
5443 return iscsi_change_queue_depth(sdev, qdepth, reason);
5444}
5445
afaf5a2d
DS
5446/**
5447 * qla4xxx_del_from_active_array - returns an active srb
5448 * @ha: Pointer to host adapter structure.
fd589a8f 5449 * @index: index into the active_array
afaf5a2d
DS
5450 *
5451 * This routine removes and returns the srb at the specified index
5452 **/
f4f5df23
VC
5453struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
5454 uint32_t index)
afaf5a2d
DS
5455{
5456 struct srb *srb = NULL;
5369887a 5457 struct scsi_cmnd *cmd = NULL;
afaf5a2d 5458
5369887a
VC
5459 cmd = scsi_host_find_tag(ha->host, index);
5460 if (!cmd)
afaf5a2d
DS
5461 return srb;
5462
5369887a
VC
5463 srb = (struct srb *)CMD_SP(cmd);
5464 if (!srb)
afaf5a2d
DS
5465 return srb;
5466
5467 /* update counters */
5468 if (srb->flags & SRB_DMA_VALID) {
5469 ha->req_q_count += srb->iocb_cnt;
5470 ha->iocb_cnt -= srb->iocb_cnt;
5471 if (srb->cmd)
5369887a
VC
5472 srb->cmd->host_scribble =
5473 (unsigned char *)(unsigned long) MAX_SRBS;
afaf5a2d
DS
5474 }
5475 return srb;
5476}
5477
afaf5a2d
DS
5478/**
5479 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
09a0f719 5480 * @ha: Pointer to host adapter structure.
afaf5a2d
DS
5481 * @cmd: Scsi Command to wait on.
5482 *
5483 * This routine waits for the command to be returned by the Firmware
5484 * for some max time.
5485 **/
5486static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
5487 struct scsi_cmnd *cmd)
5488{
5489 int done = 0;
5490 struct srb *rp;
5491 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
2232be0d
LC
5492 int ret = SUCCESS;
5493
5494 /* Dont wait on command if PCI error is being handled
5495 * by PCI AER driver
5496 */
5497 if (unlikely(pci_channel_offline(ha->pdev)) ||
5498 (test_bit(AF_EEH_BUSY, &ha->flags))) {
5499 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
5500 ha->host_no, __func__);
5501 return ret;
5502 }
afaf5a2d
DS
5503
5504 do {
5505 /* Checking to see if its returned to OS */
5369887a 5506 rp = (struct srb *) CMD_SP(cmd);
afaf5a2d
DS
5507 if (rp == NULL) {
5508 done++;
5509 break;
5510 }
5511
5512 msleep(2000);
5513 } while (max_wait_time--);
5514
5515 return done;
5516}
5517
5518/**
5519 * qla4xxx_wait_for_hba_online - waits for HBA to come online
5520 * @ha: Pointer to host adapter structure
5521 **/
5522static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
5523{
5524 unsigned long wait_online;
5525
f581a3f7 5526 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
afaf5a2d
DS
5527 while (time_before(jiffies, wait_online)) {
5528
5529 if (adapter_up(ha))
5530 return QLA_SUCCESS;
afaf5a2d
DS
5531
5532 msleep(2000);
5533 }
5534
5535 return QLA_ERROR;
5536}
5537
5538/**
ce545039 5539 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
fd589a8f 5540 * @ha: pointer to HBA
afaf5a2d
DS
5541 * @t: target id
5542 * @l: lun id
5543 *
5544 * This function waits for all outstanding commands to a lun to complete. It
5545 * returns 0 if all pending commands are returned and 1 otherwise.
5546 **/
ce545039
MC
5547static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
5548 struct scsi_target *stgt,
5549 struct scsi_device *sdev)
afaf5a2d
DS
5550{
5551 int cnt;
5552 int status = 0;
5553 struct scsi_cmnd *cmd;
5554
5555 /*
ce545039
MC
5556 * Waiting for all commands for the designated target or dev
5557 * in the active array
afaf5a2d
DS
5558 */
5559 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
5560 cmd = scsi_host_find_tag(ha->host, cnt);
ce545039
MC
5561 if (cmd && stgt == scsi_target(cmd->device) &&
5562 (!sdev || sdev == cmd->device)) {
afaf5a2d
DS
5563 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5564 status++;
5565 break;
5566 }
5567 }
5568 }
5569 return status;
5570}
5571
09a0f719
VC
5572/**
5573 * qla4xxx_eh_abort - callback for abort task.
5574 * @cmd: Pointer to Linux's SCSI command structure
5575 *
5576 * This routine is called by the Linux OS to abort the specified
5577 * command.
5578 **/
5579static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
5580{
5581 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5582 unsigned int id = cmd->device->id;
5583 unsigned int lun = cmd->device->lun;
92b3e5bb 5584 unsigned long flags;
09a0f719
VC
5585 struct srb *srb = NULL;
5586 int ret = SUCCESS;
5587 int wait = 0;
5588
c2660df3 5589 ql4_printk(KERN_INFO, ha,
5cd049a5
CH
5590 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
5591 ha->host_no, id, lun, cmd);
09a0f719 5592
92b3e5bb 5593 spin_lock_irqsave(&ha->hardware_lock, flags);
09a0f719 5594 srb = (struct srb *) CMD_SP(cmd);
92b3e5bb
MC
5595 if (!srb) {
5596 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719 5597 return SUCCESS;
92b3e5bb 5598 }
09a0f719 5599 kref_get(&srb->srb_ref);
92b3e5bb 5600 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719
VC
5601
5602 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
5603 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
5604 ha->host_no, id, lun));
5605 ret = FAILED;
5606 } else {
5607 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
5608 ha->host_no, id, lun));
5609 wait = 1;
5610 }
5611
5612 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
5613
5614 /* Wait for command to complete */
5615 if (wait) {
5616 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5617 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
5618 ha->host_no, id, lun));
5619 ret = FAILED;
5620 }
5621 }
5622
c2660df3 5623 ql4_printk(KERN_INFO, ha,
09a0f719 5624 "scsi%ld:%d:%d: Abort command - %s\n",
25985edc 5625 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
09a0f719
VC
5626
5627 return ret;
5628}
5629
afaf5a2d
DS
5630/**
5631 * qla4xxx_eh_device_reset - callback for target reset.
5632 * @cmd: Pointer to Linux's SCSI command structure
5633 *
5634 * This routine is called by the Linux OS to reset all luns on the
5635 * specified target.
5636 **/
5637static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
5638{
5639 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5640 struct ddb_entry *ddb_entry = cmd->device->hostdata;
afaf5a2d
DS
5641 int ret = FAILED, stat;
5642
612f7348 5643 if (!ddb_entry)
afaf5a2d
DS
5644 return ret;
5645
c01be6dc
MC
5646 ret = iscsi_block_scsi_eh(cmd);
5647 if (ret)
5648 return ret;
5649 ret = FAILED;
5650
c2660df3 5651 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
5652 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
5653 cmd->device->channel, cmd->device->id, cmd->device->lun);
5654
5655 DEBUG2(printk(KERN_INFO
5656 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
5657 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
242f9dcb 5658 cmd, jiffies, cmd->request->timeout / HZ,
afaf5a2d
DS
5659 ha->dpc_flags, cmd->result, cmd->allowed));
5660
5661 /* FIXME: wait for hba to go online */
5662 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
5663 if (stat != QLA_SUCCESS) {
c2660df3 5664 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
afaf5a2d
DS
5665 goto eh_dev_reset_done;
5666 }
5667
ce545039
MC
5668 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5669 cmd->device)) {
c2660df3 5670 ql4_printk(KERN_INFO, ha,
ce545039
MC
5671 "DEVICE RESET FAILED - waiting for "
5672 "commands.\n");
5673 goto eh_dev_reset_done;
afaf5a2d
DS
5674 }
5675
9d562913
DS
5676 /* Send marker. */
5677 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5678 MM_LUN_RESET) != QLA_SUCCESS)
5679 goto eh_dev_reset_done;
5680
c2660df3 5681 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
5682 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
5683 ha->host_no, cmd->device->channel, cmd->device->id,
5684 cmd->device->lun);
5685
5686 ret = SUCCESS;
5687
5688eh_dev_reset_done:
5689
5690 return ret;
5691}
5692
ce545039
MC
5693/**
5694 * qla4xxx_eh_target_reset - callback for target reset.
5695 * @cmd: Pointer to Linux's SCSI command structure
5696 *
5697 * This routine is called by the Linux OS to reset the target.
5698 **/
5699static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
5700{
5701 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5702 struct ddb_entry *ddb_entry = cmd->device->hostdata;
c01be6dc 5703 int stat, ret;
ce545039
MC
5704
5705 if (!ddb_entry)
5706 return FAILED;
5707
c01be6dc
MC
5708 ret = iscsi_block_scsi_eh(cmd);
5709 if (ret)
5710 return ret;
5711
ce545039
MC
5712 starget_printk(KERN_INFO, scsi_target(cmd->device),
5713 "WARM TARGET RESET ISSUED.\n");
5714
5715 DEBUG2(printk(KERN_INFO
5716 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
5717 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
242f9dcb 5718 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ce545039
MC
5719 ha->dpc_flags, cmd->result, cmd->allowed));
5720
5721 stat = qla4xxx_reset_target(ha, ddb_entry);
5722 if (stat != QLA_SUCCESS) {
5723 starget_printk(KERN_INFO, scsi_target(cmd->device),
5724 "WARM TARGET RESET FAILED.\n");
5725 return FAILED;
5726 }
5727
ce545039
MC
5728 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5729 NULL)) {
5730 starget_printk(KERN_INFO, scsi_target(cmd->device),
5731 "WARM TARGET DEVICE RESET FAILED - "
5732 "waiting for commands.\n");
5733 return FAILED;
5734 }
5735
9d562913
DS
5736 /* Send marker. */
5737 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5738 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
5739 starget_printk(KERN_INFO, scsi_target(cmd->device),
5740 "WARM TARGET DEVICE RESET FAILED - "
5741 "marker iocb failed.\n");
5742 return FAILED;
5743 }
5744
ce545039
MC
5745 starget_printk(KERN_INFO, scsi_target(cmd->device),
5746 "WARM TARGET RESET SUCCEEDED.\n");
5747 return SUCCESS;
5748}
5749
8a288960
SR
5750/**
5751 * qla4xxx_is_eh_active - check if error handler is running
5752 * @shost: Pointer to SCSI Host struct
5753 *
5754 * This routine finds that if reset host is called in EH
5755 * scenario or from some application like sg_reset
5756 **/
5757static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
5758{
5759 if (shost->shost_state == SHOST_RECOVERY)
5760 return 1;
5761 return 0;
5762}
5763
afaf5a2d
DS
5764/**
5765 * qla4xxx_eh_host_reset - kernel callback
5766 * @cmd: Pointer to Linux's SCSI command structure
5767 *
5768 * This routine is invoked by the Linux kernel to perform fatal error
5769 * recovery on the specified adapter.
5770 **/
5771static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5772{
5773 int return_status = FAILED;
5774 struct scsi_qla_host *ha;
5775
b3a271a9 5776 ha = to_qla_host(cmd->device->host);
afaf5a2d 5777
f4f5df23
VC
5778 if (ql4xdontresethba) {
5779 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5780 ha->host_no, __func__));
8a288960
SR
5781
5782 /* Clear outstanding srb in queues */
5783 if (qla4xxx_is_eh_active(cmd->device->host))
5784 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
5785
f4f5df23
VC
5786 return FAILED;
5787 }
5788
c2660df3 5789 ql4_printk(KERN_INFO, ha,
dca05c4c 5790 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
afaf5a2d
DS
5791 cmd->device->channel, cmd->device->id, cmd->device->lun);
5792
5793 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
5794 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
5795 "DEAD.\n", ha->host_no, cmd->device->channel,
5796 __func__));
5797
5798 return FAILED;
5799 }
5800
f4f5df23
VC
5801 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5802 if (is_qla8022(ha))
5803 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5804 else
5805 set_bit(DPC_RESET_HA, &ha->dpc_flags);
5806 }
50a29aec 5807
f4f5df23 5808 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
afaf5a2d 5809 return_status = SUCCESS;
afaf5a2d 5810
c2660df3 5811 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
25985edc 5812 return_status == FAILED ? "FAILED" : "SUCCEEDED");
afaf5a2d
DS
5813
5814 return return_status;
5815}
5816
95d31262
VC
5817static int qla4xxx_context_reset(struct scsi_qla_host *ha)
5818{
5819 uint32_t mbox_cmd[MBOX_REG_COUNT];
5820 uint32_t mbox_sts[MBOX_REG_COUNT];
5821 struct addr_ctrl_blk_def *acb = NULL;
5822 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
5823 int rval = QLA_SUCCESS;
5824 dma_addr_t acb_dma;
5825
5826 acb = dma_alloc_coherent(&ha->pdev->dev,
5827 sizeof(struct addr_ctrl_blk_def),
5828 &acb_dma, GFP_KERNEL);
5829 if (!acb) {
5830 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
5831 __func__);
5832 rval = -ENOMEM;
5833 goto exit_port_reset;
5834 }
5835
5836 memset(acb, 0, acb_len);
5837
5838 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
5839 if (rval != QLA_SUCCESS) {
5840 rval = -EIO;
5841 goto exit_free_acb;
5842 }
5843
5844 rval = qla4xxx_disable_acb(ha);
5845 if (rval != QLA_SUCCESS) {
5846 rval = -EIO;
5847 goto exit_free_acb;
5848 }
5849
5850 wait_for_completion_timeout(&ha->disable_acb_comp,
5851 DISABLE_ACB_TOV * HZ);
5852
5853 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
5854 if (rval != QLA_SUCCESS) {
5855 rval = -EIO;
5856 goto exit_free_acb;
5857 }
5858
5859exit_free_acb:
5860 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
5861 acb, acb_dma);
5862exit_port_reset:
5863 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
5864 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
5865 return rval;
5866}
5867
5868static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
5869{
5870 struct scsi_qla_host *ha = to_qla_host(shost);
5871 int rval = QLA_SUCCESS;
5872
5873 if (ql4xdontresethba) {
5874 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
5875 __func__));
5876 rval = -EPERM;
5877 goto exit_host_reset;
5878 }
5879
5880 rval = qla4xxx_wait_for_hba_online(ha);
5881 if (rval != QLA_SUCCESS) {
5882 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
5883 "adapter\n", __func__));
5884 rval = -EIO;
5885 goto exit_host_reset;
5886 }
5887
5888 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
5889 goto recover_adapter;
5890
5891 switch (reset_type) {
5892 case SCSI_ADAPTER_RESET:
5893 set_bit(DPC_RESET_HA, &ha->dpc_flags);
5894 break;
5895 case SCSI_FIRMWARE_RESET:
5896 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5897 if (is_qla8022(ha))
5898 /* set firmware context reset */
5899 set_bit(DPC_RESET_HA_FW_CONTEXT,
5900 &ha->dpc_flags);
5901 else {
5902 rval = qla4xxx_context_reset(ha);
5903 goto exit_host_reset;
5904 }
5905 }
5906 break;
5907 }
5908
5909recover_adapter:
5910 rval = qla4xxx_recover_adapter(ha);
5911 if (rval != QLA_SUCCESS) {
5912 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
5913 __func__));
5914 rval = -EIO;
5915 }
5916
5917exit_host_reset:
5918 return rval;
5919}
5920
2232be0d
LC
5921/* PCI AER driver recovers from all correctable errors w/o
5922 * driver intervention. For uncorrectable errors PCI AER
5923 * driver calls the following device driver's callbacks
5924 *
5925 * - Fatal Errors - link_reset
5926 * - Non-Fatal Errors - driver's pci_error_detected() which
5927 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
5928 *
5929 * PCI AER driver calls
5930 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
5931 * returns RECOVERED or NEED_RESET if fw_hung
5932 * NEED_RESET - driver's slot_reset()
5933 * DISCONNECT - device is dead & cannot recover
5934 * RECOVERED - driver's pci_resume()
5935 */
5936static pci_ers_result_t
5937qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5938{
5939 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5940
5941 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
5942 ha->host_no, __func__, state);
5943
5944 if (!is_aer_supported(ha))
5945 return PCI_ERS_RESULT_NONE;
5946
5947 switch (state) {
5948 case pci_channel_io_normal:
5949 clear_bit(AF_EEH_BUSY, &ha->flags);
5950 return PCI_ERS_RESULT_CAN_RECOVER;
5951 case pci_channel_io_frozen:
5952 set_bit(AF_EEH_BUSY, &ha->flags);
5953 qla4xxx_mailbox_premature_completion(ha);
5954 qla4xxx_free_irqs(ha);
5955 pci_disable_device(pdev);
7b3595df
VC
5956 /* Return back all IOs */
5957 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2232be0d
LC
5958 return PCI_ERS_RESULT_NEED_RESET;
5959 case pci_channel_io_perm_failure:
5960 set_bit(AF_EEH_BUSY, &ha->flags);
5961 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
5962 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5963 return PCI_ERS_RESULT_DISCONNECT;
5964 }
5965 return PCI_ERS_RESULT_NEED_RESET;
5966}
5967
5968/**
5969 * qla4xxx_pci_mmio_enabled() gets called if
5970 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
5971 * and read/write to the device still works.
5972 **/
5973static pci_ers_result_t
5974qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
5975{
5976 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5977
5978 if (!is_aer_supported(ha))
5979 return PCI_ERS_RESULT_NONE;
5980
7b3595df 5981 return PCI_ERS_RESULT_RECOVERED;
2232be0d
LC
5982}
5983
7b3595df 5984static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2232be0d
LC
5985{
5986 uint32_t rval = QLA_ERROR;
7b3595df 5987 uint32_t ret = 0;
2232be0d
LC
5988 int fn;
5989 struct pci_dev *other_pdev = NULL;
5990
5991 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
5992
5993 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5994
5995 if (test_bit(AF_ONLINE, &ha->flags)) {
5996 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9
MR
5997 clear_bit(AF_LINK_UP, &ha->flags);
5998 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2232be0d 5999 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2232be0d
LC
6000 }
6001
6002 fn = PCI_FUNC(ha->pdev->devfn);
6003 while (fn > 0) {
6004 fn--;
6005 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
6006 "func %x\n", ha->host_no, __func__, fn);
6007 /* Get the pci device given the domain, bus,
6008 * slot/function number */
6009 other_pdev =
6010 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
6011 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
6012 fn));
6013
6014 if (!other_pdev)
6015 continue;
6016
6017 if (atomic_read(&other_pdev->enable_cnt)) {
6018 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
6019 "func in enabled state%x\n", ha->host_no,
6020 __func__, fn);
6021 pci_dev_put(other_pdev);
6022 break;
6023 }
6024 pci_dev_put(other_pdev);
6025 }
6026
6027 /* The first function on the card, the reset owner will
6028 * start & initialize the firmware. The other functions
6029 * on the card will reset the firmware context
6030 */
6031 if (!fn) {
6032 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
6033 "0x%x is the owner\n", ha->host_no, __func__,
6034 ha->pdev->devfn);
6035
6036 qla4_8xxx_idc_lock(ha);
6037 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6038 QLA82XX_DEV_COLD);
6039
6040 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
6041 QLA82XX_IDC_VERSION);
6042
6043 qla4_8xxx_idc_unlock(ha);
6044 clear_bit(AF_FW_RECOVERY, &ha->flags);
13483730 6045 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
2232be0d
LC
6046 qla4_8xxx_idc_lock(ha);
6047
6048 if (rval != QLA_SUCCESS) {
6049 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6050 "FAILED\n", ha->host_no, __func__);
6051 qla4_8xxx_clear_drv_active(ha);
6052 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6053 QLA82XX_DEV_FAILED);
6054 } else {
6055 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6056 "READY\n", ha->host_no, __func__);
6057 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6058 QLA82XX_DEV_READY);
6059 /* Clear driver state register */
6060 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
6061 qla4_8xxx_set_drv_active(ha);
7b3595df
VC
6062 ret = qla4xxx_request_irqs(ha);
6063 if (ret) {
6064 ql4_printk(KERN_WARNING, ha, "Failed to "
6065 "reserve interrupt %d already in use.\n",
6066 ha->pdev->irq);
6067 rval = QLA_ERROR;
6068 } else {
6069 ha->isp_ops->enable_intrs(ha);
6070 rval = QLA_SUCCESS;
6071 }
2232be0d
LC
6072 }
6073 qla4_8xxx_idc_unlock(ha);
6074 } else {
6075 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
6076 "the reset owner\n", ha->host_no, __func__,
6077 ha->pdev->devfn);
6078 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
6079 QLA82XX_DEV_READY)) {
6080 clear_bit(AF_FW_RECOVERY, &ha->flags);
13483730 6081 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
7b3595df
VC
6082 if (rval == QLA_SUCCESS) {
6083 ret = qla4xxx_request_irqs(ha);
6084 if (ret) {
6085 ql4_printk(KERN_WARNING, ha, "Failed to"
6086 " reserve interrupt %d already in"
6087 " use.\n", ha->pdev->irq);
6088 rval = QLA_ERROR;
6089 } else {
6090 ha->isp_ops->enable_intrs(ha);
6091 rval = QLA_SUCCESS;
6092 }
6093 }
2232be0d
LC
6094 qla4_8xxx_idc_lock(ha);
6095 qla4_8xxx_set_drv_active(ha);
6096 qla4_8xxx_idc_unlock(ha);
6097 }
6098 }
6099 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
6100 return rval;
6101}
6102
6103static pci_ers_result_t
6104qla4xxx_pci_slot_reset(struct pci_dev *pdev)
6105{
6106 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
6107 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
6108 int rc;
6109
6110 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
6111 ha->host_no, __func__);
6112
6113 if (!is_aer_supported(ha))
6114 return PCI_ERS_RESULT_NONE;
6115
6116 /* Restore the saved state of PCIe device -
6117 * BAR registers, PCI Config space, PCIX, MSI,
6118 * IOV states
6119 */
6120 pci_restore_state(pdev);
6121
6122 /* pci_restore_state() clears the saved_state flag of the device
6123 * save restored state which resets saved_state flag
6124 */
6125 pci_save_state(pdev);
6126
6127 /* Initialize device or resume if in suspended state */
6128 rc = pci_enable_device(pdev);
6129 if (rc) {
25985edc 6130 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
2232be0d
LC
6131 "device after reset\n", ha->host_no, __func__);
6132 goto exit_slot_reset;
6133 }
6134
7b3595df 6135 ha->isp_ops->disable_intrs(ha);
2232be0d
LC
6136
6137 if (is_qla8022(ha)) {
6138 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
6139 ret = PCI_ERS_RESULT_RECOVERED;
6140 goto exit_slot_reset;
6141 } else
6142 goto exit_slot_reset;
6143 }
6144
6145exit_slot_reset:
6146 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
6147 "device after reset\n", ha->host_no, __func__, ret);
6148 return ret;
6149}
6150
6151static void
6152qla4xxx_pci_resume(struct pci_dev *pdev)
6153{
6154 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
6155 int ret;
6156
6157 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
6158 ha->host_no, __func__);
6159
6160 ret = qla4xxx_wait_for_hba_online(ha);
6161 if (ret != QLA_SUCCESS) {
6162 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
6163 "resume I/O from slot/link_reset\n", ha->host_no,
6164 __func__);
6165 }
6166
6167 pci_cleanup_aer_uncorrect_error_status(pdev);
6168 clear_bit(AF_EEH_BUSY, &ha->flags);
6169}
6170
6171static struct pci_error_handlers qla4xxx_err_handler = {
6172 .error_detected = qla4xxx_pci_error_detected,
6173 .mmio_enabled = qla4xxx_pci_mmio_enabled,
6174 .slot_reset = qla4xxx_pci_slot_reset,
6175 .resume = qla4xxx_pci_resume,
6176};
6177
afaf5a2d
DS
6178static struct pci_device_id qla4xxx_pci_tbl[] = {
6179 {
6180 .vendor = PCI_VENDOR_ID_QLOGIC,
6181 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
6182 .subvendor = PCI_ANY_ID,
6183 .subdevice = PCI_ANY_ID,
6184 },
6185 {
6186 .vendor = PCI_VENDOR_ID_QLOGIC,
6187 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
6188 .subvendor = PCI_ANY_ID,
6189 .subdevice = PCI_ANY_ID,
6190 },
d915058f
DS
6191 {
6192 .vendor = PCI_VENDOR_ID_QLOGIC,
6193 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
6194 .subvendor = PCI_ANY_ID,
6195 .subdevice = PCI_ANY_ID,
6196 },
f4f5df23
VC
6197 {
6198 .vendor = PCI_VENDOR_ID_QLOGIC,
6199 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
6200 .subvendor = PCI_ANY_ID,
6201 .subdevice = PCI_ANY_ID,
6202 },
afaf5a2d
DS
6203 {0, 0},
6204};
6205MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
6206
47975477 6207static struct pci_driver qla4xxx_pci_driver = {
afaf5a2d
DS
6208 .name = DRIVER_NAME,
6209 .id_table = qla4xxx_pci_tbl,
6210 .probe = qla4xxx_probe_adapter,
6211 .remove = qla4xxx_remove_adapter,
2232be0d 6212 .err_handler = &qla4xxx_err_handler,
afaf5a2d
DS
6213};
6214
6215static int __init qla4xxx_module_init(void)
6216{
6217 int ret;
6218
6219 /* Allocate cache for SRBs. */
6220 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
20c2df83 6221 SLAB_HWCACHE_ALIGN, NULL);
afaf5a2d
DS
6222 if (srb_cachep == NULL) {
6223 printk(KERN_ERR
6224 "%s: Unable to allocate SRB cache..."
6225 "Failing load!\n", DRIVER_NAME);
6226 ret = -ENOMEM;
6227 goto no_srp_cache;
6228 }
6229
6230 /* Derive version string. */
6231 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
11010fec 6232 if (ql4xextended_error_logging)
afaf5a2d
DS
6233 strcat(qla4xxx_version_str, "-debug");
6234
6235 qla4xxx_scsi_transport =
6236 iscsi_register_transport(&qla4xxx_iscsi_transport);
6237 if (!qla4xxx_scsi_transport){
6238 ret = -ENODEV;
6239 goto release_srb_cache;
6240 }
6241
afaf5a2d
DS
6242 ret = pci_register_driver(&qla4xxx_pci_driver);
6243 if (ret)
6244 goto unregister_transport;
6245
6246 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
6247 return 0;
5ae16db3 6248
afaf5a2d
DS
6249unregister_transport:
6250 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6251release_srb_cache:
6252 kmem_cache_destroy(srb_cachep);
6253no_srp_cache:
6254 return ret;
6255}
6256
6257static void __exit qla4xxx_module_exit(void)
6258{
6259 pci_unregister_driver(&qla4xxx_pci_driver);
6260 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6261 kmem_cache_destroy(srb_cachep);
6262}
6263
6264module_init(qla4xxx_module_init);
6265module_exit(qla4xxx_module_exit);
6266
6267MODULE_AUTHOR("QLogic Corporation");
6268MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
6269MODULE_LICENSE("GPL");
6270MODULE_VERSION(QLA4XXX_DRIVER_VERSION);