Merge tag 'nfs-for-4.10-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_bsg.c
CommitLineData
cd21c605 1 /*
6e98016c 2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
6e98016c
GM
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/delay.h>
75cc8cfc 12#include <linux/bsg-lib.h>
6e98016c
GM
13
14/* BSG support for ELS/CT pass through */
9ba56b95
GM
15void
16qla2x00_bsg_job_done(void *data, void *ptr, int res)
6e98016c 17{
9ba56b95
GM
18 srb_t *sp = (srb_t *)ptr;
19 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
75cc8cfc 20 struct bsg_job *bsg_job = sp->u.bsg_job;
01e0e15c 21 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
9ba56b95 22
01e0e15c 23 bsg_reply->result = res;
06548160 24 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 25 bsg_reply->reply_payload_rcv_len);
9ba56b95
GM
26 sp->free(vha, sp);
27}
28
29void
30qla2x00_bsg_sp_free(void *data, void *ptr)
31{
32 srb_t *sp = (srb_t *)ptr;
b00ee7d7 33 struct scsi_qla_host *vha = sp->fcport->vha;
75cc8cfc 34 struct bsg_job *bsg_job = sp->u.bsg_job;
01e0e15c
JT
35 struct fc_bsg_request *bsg_request = bsg_job->request;
36
6e98016c 37 struct qla_hw_data *ha = vha->hw;
8ae6d9c7 38 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
6e98016c 39
8ae6d9c7
GM
40 if (sp->type == SRB_FXIOCB_BCMD) {
41 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
01e0e15c 42 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
6e98016c 43
8ae6d9c7
GM
44 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
45 dma_unmap_sg(&ha->pdev->dev,
46 bsg_job->request_payload.sg_list,
47 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
48
49 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
50 dma_unmap_sg(&ha->pdev->dev,
51 bsg_job->reply_payload.sg_list,
52 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
53 } else {
54 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
55 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
56
57 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
58 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
59 }
9ba56b95
GM
60
61 if (sp->type == SRB_CT_CMD ||
8ae6d9c7 62 sp->type == SRB_FXIOCB_BCMD ||
9ba56b95
GM
63 sp->type == SRB_ELS_CMD_HST)
64 kfree(sp->fcport);
b00ee7d7 65 qla2x00_rel_sp(vha, sp);
6e98016c
GM
66}
67
09ff701a 68int
7c3df132
SK
69qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
70 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
09ff701a
SR
71{
72 int i, ret, num_valid;
73 uint8_t *bcode;
74 struct qla_fcp_prio_entry *pri_entry;
2f0f3f4f 75 uint32_t *bcode_val_ptr, bcode_val;
09ff701a
SR
76
77 ret = 1;
78 num_valid = 0;
79 bcode = (uint8_t *)pri_cfg;
2f0f3f4f
MI
80 bcode_val_ptr = (uint32_t *)pri_cfg;
81 bcode_val = (uint32_t)(*bcode_val_ptr);
09ff701a 82
2f0f3f4f
MI
83 if (bcode_val == 0xFFFFFFFF) {
84 /* No FCP Priority config data in flash */
7c3df132
SK
85 ql_dbg(ql_dbg_user, vha, 0x7051,
86 "No FCP Priority config data.\n");
2f0f3f4f
MI
87 return 0;
88 }
89
90 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
91 bcode[3] != 'S') {
92 /* Invalid FCP priority data header*/
7c3df132
SK
93 ql_dbg(ql_dbg_user, vha, 0x7052,
94 "Invalid FCP Priority data header. bcode=0x%x.\n",
95 bcode_val);
09ff701a
SR
96 return 0;
97 }
98 if (flag != 1)
99 return ret;
100
101 pri_entry = &pri_cfg->entry[0];
102 for (i = 0; i < pri_cfg->num_entries; i++) {
103 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
104 num_valid++;
105 pri_entry++;
106 }
107
2f0f3f4f
MI
108 if (num_valid == 0) {
109 /* No valid FCP priority data entries */
7c3df132
SK
110 ql_dbg(ql_dbg_user, vha, 0x7053,
111 "No valid FCP Priority data entries.\n");
09ff701a 112 ret = 0;
2f0f3f4f
MI
113 } else {
114 /* FCP priority data is valid */
7c3df132
SK
115 ql_dbg(ql_dbg_user, vha, 0x7054,
116 "Valid FCP priority data. num entries = %d.\n",
117 num_valid);
2f0f3f4f 118 }
09ff701a
SR
119
120 return ret;
121}
122
123static int
75cc8cfc 124qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
09ff701a 125{
cd21c605 126 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
01e0e15c
JT
127 struct fc_bsg_request *bsg_request = bsg_job->request;
128 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
09ff701a
SR
129 scsi_qla_host_t *vha = shost_priv(host);
130 struct qla_hw_data *ha = vha->hw;
131 int ret = 0;
132 uint32_t len;
133 uint32_t oper;
134
7ec0effd 135 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
2f0f3f4f
MI
136 ret = -EINVAL;
137 goto exit_fcp_prio_cfg;
138 }
139
09ff701a 140 /* Get the sub command */
01e0e15c 141 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
09ff701a
SR
142
143 /* Only set config is allowed if config memory is not allocated */
144 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
145 ret = -EINVAL;
146 goto exit_fcp_prio_cfg;
147 }
148 switch (oper) {
149 case QLFC_FCP_PRIO_DISABLE:
150 if (ha->flags.fcp_prio_enabled) {
151 ha->flags.fcp_prio_enabled = 0;
152 ha->fcp_prio_cfg->attributes &=
153 ~FCP_PRIO_ATTR_ENABLE;
154 qla24xx_update_all_fcp_prio(vha);
01e0e15c 155 bsg_reply->result = DID_OK;
09ff701a
SR
156 } else {
157 ret = -EINVAL;
01e0e15c 158 bsg_reply->result = (DID_ERROR << 16);
09ff701a
SR
159 goto exit_fcp_prio_cfg;
160 }
161 break;
162
163 case QLFC_FCP_PRIO_ENABLE:
164 if (!ha->flags.fcp_prio_enabled) {
165 if (ha->fcp_prio_cfg) {
166 ha->flags.fcp_prio_enabled = 1;
167 ha->fcp_prio_cfg->attributes |=
168 FCP_PRIO_ATTR_ENABLE;
169 qla24xx_update_all_fcp_prio(vha);
01e0e15c 170 bsg_reply->result = DID_OK;
09ff701a
SR
171 } else {
172 ret = -EINVAL;
01e0e15c 173 bsg_reply->result = (DID_ERROR << 16);
09ff701a
SR
174 goto exit_fcp_prio_cfg;
175 }
176 }
177 break;
178
179 case QLFC_FCP_PRIO_GET_CONFIG:
180 len = bsg_job->reply_payload.payload_len;
181 if (!len || len > FCP_PRIO_CFG_SIZE) {
182 ret = -EINVAL;
01e0e15c 183 bsg_reply->result = (DID_ERROR << 16);
09ff701a
SR
184 goto exit_fcp_prio_cfg;
185 }
186
01e0e15c
JT
187 bsg_reply->result = DID_OK;
188 bsg_reply->reply_payload_rcv_len =
09ff701a
SR
189 sg_copy_from_buffer(
190 bsg_job->reply_payload.sg_list,
191 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
192 len);
193
194 break;
195
196 case QLFC_FCP_PRIO_SET_CONFIG:
197 len = bsg_job->request_payload.payload_len;
198 if (!len || len > FCP_PRIO_CFG_SIZE) {
01e0e15c 199 bsg_reply->result = (DID_ERROR << 16);
09ff701a
SR
200 ret = -EINVAL;
201 goto exit_fcp_prio_cfg;
202 }
203
204 if (!ha->fcp_prio_cfg) {
205 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
206 if (!ha->fcp_prio_cfg) {
7c3df132
SK
207 ql_log(ql_log_warn, vha, 0x7050,
208 "Unable to allocate memory for fcp prio "
209 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
01e0e15c 210 bsg_reply->result = (DID_ERROR << 16);
09ff701a
SR
211 ret = -ENOMEM;
212 goto exit_fcp_prio_cfg;
213 }
214 }
215
216 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
217 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
218 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
219 FCP_PRIO_CFG_SIZE);
220
221 /* validate fcp priority data */
7c3df132
SK
222
223 if (!qla24xx_fcp_prio_cfg_valid(vha,
224 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
01e0e15c 225 bsg_reply->result = (DID_ERROR << 16);
09ff701a
SR
226 ret = -EINVAL;
227 /* If buffer was invalidatic int
228 * fcp_prio_cfg is of no use
229 */
230 vfree(ha->fcp_prio_cfg);
231 ha->fcp_prio_cfg = NULL;
232 goto exit_fcp_prio_cfg;
233 }
234
235 ha->flags.fcp_prio_enabled = 0;
236 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
237 ha->flags.fcp_prio_enabled = 1;
238 qla24xx_update_all_fcp_prio(vha);
01e0e15c 239 bsg_reply->result = DID_OK;
09ff701a
SR
240 break;
241 default:
242 ret = -EINVAL;
243 break;
244 }
245exit_fcp_prio_cfg:
63ea923a 246 if (!ret)
06548160 247 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 248 bsg_reply->reply_payload_rcv_len);
09ff701a
SR
249 return ret;
250}
9ba56b95 251
6e98016c 252static int
75cc8cfc 253qla2x00_process_els(struct bsg_job *bsg_job)
6e98016c 254{
01e0e15c 255 struct fc_bsg_request *bsg_request = bsg_job->request;
6e98016c 256 struct fc_rport *rport;
08f71e09 257 fc_port_t *fcport = NULL;
6e98016c
GM
258 struct Scsi_Host *host;
259 scsi_qla_host_t *vha;
260 struct qla_hw_data *ha;
261 srb_t *sp;
262 const char *type;
263 int req_sg_cnt, rsp_sg_cnt;
264 int rval = (DRIVER_ERROR << 16);
265 uint16_t nextlid = 0;
6e98016c 266
01e0e15c 267 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
1d69b122 268 rport = fc_bsg_to_rport(bsg_job);
08f71e09
HZ
269 fcport = *(fc_port_t **) rport->dd_data;
270 host = rport_to_shost(rport);
271 vha = shost_priv(host);
272 ha = vha->hw;
273 type = "FC_BSG_RPT_ELS";
274 } else {
cd21c605 275 host = fc_bsg_to_shost(bsg_job);
08f71e09
HZ
276 vha = shost_priv(host);
277 ha = vha->hw;
278 type = "FC_BSG_HST_ELS_NOLOGIN";
279 }
280
8c0eb596
BVA
281 if (!vha->flags.online) {
282 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
283 rval = -EIO;
284 goto done;
285 }
286
08f71e09
HZ
287 /* pass through is supported only for ISP 4Gb or higher */
288 if (!IS_FWI2_CAPABLE(ha)) {
7c3df132
SK
289 ql_dbg(ql_dbg_user, vha, 0x7001,
290 "ELS passthru not supported for ISP23xx based adapters.\n");
08f71e09
HZ
291 rval = -EPERM;
292 goto done;
293 }
294
6e98016c
GM
295 /* Multiple SG's are not supported for ELS requests */
296 if (bsg_job->request_payload.sg_cnt > 1 ||
297 bsg_job->reply_payload.sg_cnt > 1) {
7c3df132
SK
298 ql_dbg(ql_dbg_user, vha, 0x7002,
299 "Multiple SG's are not suppored for ELS requests, "
300 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
301 bsg_job->request_payload.sg_cnt,
302 bsg_job->reply_payload.sg_cnt);
6e98016c
GM
303 rval = -EPERM;
304 goto done;
305 }
306
307 /* ELS request for rport */
01e0e15c 308 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
6e98016c
GM
309 /* make sure the rport is logged in,
310 * if not perform fabric login
311 */
312 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
7c3df132
SK
313 ql_dbg(ql_dbg_user, vha, 0x7003,
314 "Failed to login port %06X for ELS passthru.\n",
315 fcport->d_id.b24);
6e98016c
GM
316 rval = -EIO;
317 goto done;
318 }
319 } else {
6e98016c
GM
320 /* Allocate a dummy fcport structure, since functions
321 * preparing the IOCB and mailbox command retrieves port
322 * specific information from fcport structure. For Host based
323 * ELS commands there will be no fcport structure allocated
324 */
325 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
326 if (!fcport) {
327 rval = -ENOMEM;
328 goto done;
329 }
330
331 /* Initialize all required fields of fcport */
332 fcport->vha = vha;
6e98016c 333 fcport->d_id.b.al_pa =
01e0e15c 334 bsg_request->rqst_data.h_els.port_id[0];
6e98016c 335 fcport->d_id.b.area =
01e0e15c 336 bsg_request->rqst_data.h_els.port_id[1];
6e98016c 337 fcport->d_id.b.domain =
01e0e15c 338 bsg_request->rqst_data.h_els.port_id[2];
6e98016c
GM
339 fcport->loop_id =
340 (fcport->d_id.b.al_pa == 0xFD) ?
341 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
342 }
343
6e98016c
GM
344 req_sg_cnt =
345 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
346 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
347 if (!req_sg_cnt) {
348 rval = -ENOMEM;
349 goto done_free_fcport;
350 }
6c452a45
AV
351
352 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
353 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
6e98016c
GM
354 if (!rsp_sg_cnt) {
355 rval = -ENOMEM;
356 goto done_free_fcport;
357 }
358
359 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
6c452a45 360 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
7c3df132
SK
361 ql_log(ql_log_warn, vha, 0x7008,
362 "dma mapping resulted in different sg counts, "
363 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
364 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
365 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
6e98016c
GM
366 rval = -EAGAIN;
367 goto done_unmap_sg;
368 }
369
370 /* Alloc SRB structure */
9ba56b95 371 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
6e98016c
GM
372 if (!sp) {
373 rval = -ENOMEM;
6c452a45 374 goto done_unmap_sg;
6e98016c
GM
375 }
376
9ba56b95 377 sp->type =
01e0e15c
JT
378 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
379 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
9ba56b95 380 sp->name =
01e0e15c
JT
381 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
382 "bsg_els_rpt" : "bsg_els_hst");
9ba56b95
GM
383 sp->u.bsg_job = bsg_job;
384 sp->free = qla2x00_bsg_sp_free;
385 sp->done = qla2x00_bsg_job_done;
6e98016c 386
7c3df132
SK
387 ql_dbg(ql_dbg_user, vha, 0x700a,
388 "bsg rqst type: %s els type: %x - loop-id=%x "
389 "portid=%-2x%02x%02x.\n", type,
01e0e15c 390 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
7c3df132 391 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
6e98016c
GM
392
393 rval = qla2x00_start_sp(sp);
394 if (rval != QLA_SUCCESS) {
7c3df132
SK
395 ql_log(ql_log_warn, vha, 0x700e,
396 "qla2x00_start_sp failed = %d\n", rval);
b00ee7d7 397 qla2x00_rel_sp(vha, sp);
6e98016c
GM
398 rval = -EIO;
399 goto done_unmap_sg;
400 }
401 return rval;
402
403done_unmap_sg:
404 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
405 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
406 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
407 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
408 goto done_free_fcport;
409
410done_free_fcport:
01e0e15c 411 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
6e98016c
GM
412 kfree(fcport);
413done:
414 return rval;
415}
416
2374dd23 417static inline uint16_t
5780790e
AV
418qla24xx_calc_ct_iocbs(uint16_t dsds)
419{
420 uint16_t iocbs;
421
422 iocbs = 1;
423 if (dsds > 2) {
424 iocbs += (dsds - 2) / 5;
425 if ((dsds - 2) % 5)
426 iocbs++;
427 }
428 return iocbs;
429}
430
6e98016c 431static int
75cc8cfc 432qla2x00_process_ct(struct bsg_job *bsg_job)
6e98016c
GM
433{
434 srb_t *sp;
01e0e15c 435 struct fc_bsg_request *bsg_request = bsg_job->request;
cd21c605 436 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
6e98016c
GM
437 scsi_qla_host_t *vha = shost_priv(host);
438 struct qla_hw_data *ha = vha->hw;
439 int rval = (DRIVER_ERROR << 16);
440 int req_sg_cnt, rsp_sg_cnt;
441 uint16_t loop_id;
442 struct fc_port *fcport;
443 char *type = "FC_BSG_HST_CT";
6e98016c 444
6e98016c
GM
445 req_sg_cnt =
446 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
447 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
6c452a45 448 if (!req_sg_cnt) {
7c3df132
SK
449 ql_log(ql_log_warn, vha, 0x700f,
450 "dma_map_sg return %d for request\n", req_sg_cnt);
6e98016c
GM
451 rval = -ENOMEM;
452 goto done;
453 }
454
455 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
456 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
457 if (!rsp_sg_cnt) {
7c3df132
SK
458 ql_log(ql_log_warn, vha, 0x7010,
459 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
6e98016c
GM
460 rval = -ENOMEM;
461 goto done;
462 }
463
464 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
6c452a45 465 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
7c3df132
SK
466 ql_log(ql_log_warn, vha, 0x7011,
467 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
468 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
469 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
6e98016c 470 rval = -EAGAIN;
6c452a45 471 goto done_unmap_sg;
6e98016c
GM
472 }
473
474 if (!vha->flags.online) {
7c3df132
SK
475 ql_log(ql_log_warn, vha, 0x7012,
476 "Host is not online.\n");
6e98016c
GM
477 rval = -EIO;
478 goto done_unmap_sg;
479 }
480
481 loop_id =
01e0e15c 482 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
6e98016c
GM
483 >> 24;
484 switch (loop_id) {
6c452a45
AV
485 case 0xFC:
486 loop_id = cpu_to_le16(NPH_SNS);
487 break;
488 case 0xFA:
489 loop_id = vha->mgmt_svr_loop_id;
490 break;
491 default:
7c3df132
SK
492 ql_dbg(ql_dbg_user, vha, 0x7013,
493 "Unknown loop id: %x.\n", loop_id);
6c452a45
AV
494 rval = -EINVAL;
495 goto done_unmap_sg;
6e98016c
GM
496 }
497
498 /* Allocate a dummy fcport structure, since functions preparing the
499 * IOCB and mailbox command retrieves port specific information
500 * from fcport structure. For Host based ELS commands there will be
501 * no fcport structure allocated
502 */
503 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6c452a45 504 if (!fcport) {
7c3df132
SK
505 ql_log(ql_log_warn, vha, 0x7014,
506 "Failed to allocate fcport.\n");
6e98016c 507 rval = -ENOMEM;
6c452a45 508 goto done_unmap_sg;
6e98016c
GM
509 }
510
511 /* Initialize all required fields of fcport */
512 fcport->vha = vha;
01e0e15c
JT
513 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
514 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
515 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
6e98016c
GM
516 fcport->loop_id = loop_id;
517
518 /* Alloc SRB structure */
9ba56b95 519 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
6e98016c 520 if (!sp) {
7c3df132 521 ql_log(ql_log_warn, vha, 0x7015,
9ba56b95 522 "qla2x00_get_sp failed.\n");
6e98016c
GM
523 rval = -ENOMEM;
524 goto done_free_fcport;
525 }
526
9ba56b95
GM
527 sp->type = SRB_CT_CMD;
528 sp->name = "bsg_ct";
529 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
530 sp->u.bsg_job = bsg_job;
531 sp->free = qla2x00_bsg_sp_free;
532 sp->done = qla2x00_bsg_job_done;
6e98016c 533
7c3df132
SK
534 ql_dbg(ql_dbg_user, vha, 0x7016,
535 "bsg rqst type: %s else type: %x - "
536 "loop-id=%x portid=%02x%02x%02x.\n", type,
01e0e15c 537 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
7c3df132
SK
538 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
539 fcport->d_id.b.al_pa);
6e98016c
GM
540
541 rval = qla2x00_start_sp(sp);
542 if (rval != QLA_SUCCESS) {
7c3df132
SK
543 ql_log(ql_log_warn, vha, 0x7017,
544 "qla2x00_start_sp failed=%d.\n", rval);
b00ee7d7 545 qla2x00_rel_sp(vha, sp);
6e98016c
GM
546 rval = -EIO;
547 goto done_free_fcport;
548 }
549 return rval;
550
551done_free_fcport:
552 kfree(fcport);
553done_unmap_sg:
554 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
555 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
556 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
557 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
558done:
559 return rval;
560}
67b2a31f
CD
561
562/* Disable loopback mode */
563static inline int
564qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
f356bef1 565 int wait, int wait2)
67b2a31f
CD
566{
567 int ret = 0;
568 int rval = 0;
569 uint16_t new_config[4];
570 struct qla_hw_data *ha = vha->hw;
571
7ec0effd 572 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
67b2a31f
CD
573 goto done_reset_internal;
574
575 memset(new_config, 0 , sizeof(new_config));
576 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
577 ENABLE_INTERNAL_LOOPBACK ||
578 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
579 ENABLE_EXTERNAL_LOOPBACK) {
580 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
581 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
582 (new_config[0] & INTERNAL_LOOPBACK_MASK));
583 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
584
585 ha->notify_dcbx_comp = wait;
f356bef1
CD
586 ha->notify_lb_portup_comp = wait2;
587
67b2a31f
CD
588 ret = qla81xx_set_port_config(vha, new_config);
589 if (ret != QLA_SUCCESS) {
590 ql_log(ql_log_warn, vha, 0x7025,
591 "Set port config failed.\n");
592 ha->notify_dcbx_comp = 0;
f356bef1 593 ha->notify_lb_portup_comp = 0;
67b2a31f
CD
594 rval = -EINVAL;
595 goto done_reset_internal;
596 }
597
598 /* Wait for DCBX complete event */
599 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
f356bef1 600 (DCBX_COMP_TIMEOUT * HZ))) {
67b2a31f 601 ql_dbg(ql_dbg_user, vha, 0x7026,
f356bef1 602 "DCBX completion not received.\n");
67b2a31f 603 ha->notify_dcbx_comp = 0;
f356bef1 604 ha->notify_lb_portup_comp = 0;
67b2a31f
CD
605 rval = -EINVAL;
606 goto done_reset_internal;
607 } else
608 ql_dbg(ql_dbg_user, vha, 0x7027,
f356bef1
CD
609 "DCBX completion received.\n");
610
611 if (wait2 &&
612 !wait_for_completion_timeout(&ha->lb_portup_comp,
613 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
614 ql_dbg(ql_dbg_user, vha, 0x70c5,
615 "Port up completion not received.\n");
616 ha->notify_lb_portup_comp = 0;
617 rval = -EINVAL;
618 goto done_reset_internal;
619 } else
620 ql_dbg(ql_dbg_user, vha, 0x70c6,
621 "Port up completion received.\n");
67b2a31f
CD
622
623 ha->notify_dcbx_comp = 0;
f356bef1 624 ha->notify_lb_portup_comp = 0;
67b2a31f
CD
625 }
626done_reset_internal:
627 return rval;
628}
629
8fcd6b8b
CD
630/*
631 * Set the port configuration to enable the internal or external loopback
632 * depending on the loopback mode.
23f2ebd1
SR
633 */
634static inline int
8fcd6b8b
CD
635qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
636 uint16_t *new_config, uint16_t mode)
23f2ebd1
SR
637{
638 int ret = 0;
639 int rval = 0;
454073c9 640 unsigned long rem_tmo = 0, current_tmo = 0;
23f2ebd1
SR
641 struct qla_hw_data *ha = vha->hw;
642
7ec0effd 643 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
23f2ebd1
SR
644 goto done_set_internal;
645
8fcd6b8b
CD
646 if (mode == INTERNAL_LOOPBACK)
647 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
648 else if (mode == EXTERNAL_LOOPBACK)
649 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
650 ql_dbg(ql_dbg_user, vha, 0x70be,
651 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
652
653 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
23f2ebd1
SR
654
655 ha->notify_dcbx_comp = 1;
656 ret = qla81xx_set_port_config(vha, new_config);
657 if (ret != QLA_SUCCESS) {
7c3df132
SK
658 ql_log(ql_log_warn, vha, 0x7021,
659 "set port config failed.\n");
23f2ebd1
SR
660 ha->notify_dcbx_comp = 0;
661 rval = -EINVAL;
662 goto done_set_internal;
663 }
664
665 /* Wait for DCBX complete event */
454073c9
SV
666 current_tmo = DCBX_COMP_TIMEOUT * HZ;
667 while (1) {
668 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
669 current_tmo);
670 if (!ha->idc_extend_tmo || rem_tmo) {
671 ha->idc_extend_tmo = 0;
672 break;
673 }
674 current_tmo = ha->idc_extend_tmo * HZ;
675 ha->idc_extend_tmo = 0;
676 }
677
678 if (!rem_tmo) {
7c3df132 679 ql_dbg(ql_dbg_user, vha, 0x7022,
f356bef1
CD
680 "DCBX completion not received.\n");
681 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
67b2a31f
CD
682 /*
683 * If the reset of the loopback mode doesn't work take a FCoE
684 * dump and reset the chip.
685 */
686 if (ret) {
687 ha->isp_ops->fw_dump(vha, 0);
688 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
689 }
bf5b8ad7
CD
690 rval = -EINVAL;
691 } else {
692 if (ha->flags.idc_compl_status) {
693 ql_dbg(ql_dbg_user, vha, 0x70c3,
694 "Bad status in IDC Completion AEN\n");
695 rval = -EINVAL;
696 ha->flags.idc_compl_status = 0;
697 } else
698 ql_dbg(ql_dbg_user, vha, 0x7023,
f356bef1 699 "DCBX completion received.\n");
bf5b8ad7 700 }
23f2ebd1
SR
701
702 ha->notify_dcbx_comp = 0;
454073c9 703 ha->idc_extend_tmo = 0;
23f2ebd1
SR
704
705done_set_internal:
706 return rval;
707}
708
6e98016c 709static int
75cc8cfc 710qla2x00_process_loopback(struct bsg_job *bsg_job)
6e98016c 711{
01e0e15c
JT
712 struct fc_bsg_request *bsg_request = bsg_job->request;
713 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 714 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
6e98016c
GM
715 scsi_qla_host_t *vha = shost_priv(host);
716 struct qla_hw_data *ha = vha->hw;
717 int rval;
718 uint8_t command_sent;
719 char *type;
720 struct msg_echo_lb elreq;
721 uint16_t response[MAILBOX_REGISTER_COUNT];
23f2ebd1 722 uint16_t config[4], new_config[4];
6c452a45 723 uint8_t *fw_sts_ptr;
6e98016c
GM
724 uint8_t *req_data = NULL;
725 dma_addr_t req_data_dma;
726 uint32_t req_data_len;
727 uint8_t *rsp_data = NULL;
728 dma_addr_t rsp_data_dma;
729 uint32_t rsp_data_len;
730
6e98016c 731 if (!vha->flags.online) {
7c3df132 732 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
6e98016c
GM
733 return -EIO;
734 }
735
736 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
737 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
738 DMA_TO_DEVICE);
739
7c3df132
SK
740 if (!elreq.req_sg_cnt) {
741 ql_log(ql_log_warn, vha, 0x701a,
742 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
6e98016c 743 return -ENOMEM;
7c3df132 744 }
6e98016c
GM
745
746 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
747 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
748 DMA_FROM_DEVICE);
749
750 if (!elreq.rsp_sg_cnt) {
7c3df132
SK
751 ql_log(ql_log_warn, vha, 0x701b,
752 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
6e98016c
GM
753 rval = -ENOMEM;
754 goto done_unmap_req_sg;
6c452a45 755 }
6e98016c
GM
756
757 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
758 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
7c3df132
SK
759 ql_log(ql_log_warn, vha, 0x701c,
760 "dma mapping resulted in different sg counts, "
761 "request_sg_cnt: %x dma_request_sg_cnt: %x "
762 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
763 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
764 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
6e98016c
GM
765 rval = -EAGAIN;
766 goto done_unmap_sg;
767 }
768 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
769 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
770 &req_data_dma, GFP_KERNEL);
771 if (!req_data) {
7c3df132
SK
772 ql_log(ql_log_warn, vha, 0x701d,
773 "dma alloc failed for req_data.\n");
6e98016c
GM
774 rval = -ENOMEM;
775 goto done_unmap_sg;
776 }
777
778 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
779 &rsp_data_dma, GFP_KERNEL);
780 if (!rsp_data) {
7c3df132
SK
781 ql_log(ql_log_warn, vha, 0x7004,
782 "dma alloc failed for rsp_data.\n");
6e98016c
GM
783 rval = -ENOMEM;
784 goto done_free_dma_req;
785 }
786
787 /* Copy the request buffer in req_data now */
788 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
789 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
790
791 elreq.send_dma = req_data_dma;
792 elreq.rcv_dma = rsp_data_dma;
793 elreq.transfer_size = req_data_len;
794
01e0e15c 795 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1b98b421 796 elreq.iteration_count =
01e0e15c 797 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
6e98016c 798
8fcd6b8b
CD
799 if (atomic_read(&vha->loop_state) == LOOP_READY &&
800 (ha->current_topology == ISP_CFG_F ||
7ec0effd 801 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
23f2ebd1
SR
802 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
803 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
804 elreq.options == EXTERNAL_LOOPBACK) {
805 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
7c3df132
SK
806 ql_dbg(ql_dbg_user, vha, 0x701e,
807 "BSG request type: %s.\n", type);
23f2ebd1
SR
808 command_sent = INT_DEF_LB_ECHO_CMD;
809 rval = qla2x00_echo_test(vha, &elreq, response);
810 } else {
7ec0effd 811 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
23f2ebd1
SR
812 memset(config, 0, sizeof(config));
813 memset(new_config, 0, sizeof(new_config));
f356bef1 814
23f2ebd1 815 if (qla81xx_get_port_config(vha, config)) {
7c3df132
SK
816 ql_log(ql_log_warn, vha, 0x701f,
817 "Get port config failed.\n");
23f2ebd1 818 rval = -EPERM;
9bceab4e 819 goto done_free_dma_rsp;
23f2ebd1
SR
820 }
821
1bcc46cb
CD
822 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
823 ql_dbg(ql_dbg_user, vha, 0x70c4,
824 "Loopback operation already in "
825 "progress.\n");
826 rval = -EAGAIN;
827 goto done_free_dma_rsp;
828 }
829
8fcd6b8b
CD
830 ql_dbg(ql_dbg_user, vha, 0x70c0,
831 "elreq.options=%04x\n", elreq.options);
832
833 if (elreq.options == EXTERNAL_LOOPBACK)
7ec0effd 834 if (IS_QLA8031(ha) || IS_QLA8044(ha))
8fcd6b8b
CD
835 rval = qla81xx_set_loopback_mode(vha,
836 config, new_config, elreq.options);
837 else
838 rval = qla81xx_reset_loopback_mode(vha,
f356bef1 839 config, 1, 0);
8fcd6b8b
CD
840 else
841 rval = qla81xx_set_loopback_mode(vha, config,
842 new_config, elreq.options);
843
844 if (rval) {
8fcd6b8b 845 rval = -EPERM;
9bceab4e 846 goto done_free_dma_rsp;
23f2ebd1
SR
847 }
848
849 type = "FC_BSG_HST_VENDOR_LOOPBACK";
7c3df132
SK
850 ql_dbg(ql_dbg_user, vha, 0x7028,
851 "BSG request type: %s.\n", type);
23f2ebd1
SR
852
853 command_sent = INT_DEF_LB_LOOPBACK_CMD;
854 rval = qla2x00_loopback_test(vha, &elreq, response);
855
992357c6
CD
856 if (response[0] == MBS_COMMAND_ERROR &&
857 response[1] == MBS_LB_RESET) {
858 ql_log(ql_log_warn, vha, 0x7029,
859 "MBX command error, Aborting ISP.\n");
860 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
861 qla2xxx_wake_dpc(vha);
862 qla2x00_wait_for_chip_reset(vha);
863 /* Also reset the MPI */
864 if (IS_QLA81XX(ha)) {
865 if (qla81xx_restart_mpi_firmware(vha) !=
866 QLA_SUCCESS) {
867 ql_log(ql_log_warn, vha, 0x702a,
868 "MPI reset failed.\n");
869 }
870 }
871
872 rval = -EIO;
873 goto done_free_dma_rsp;
874 }
875
4052bd57 876 if (new_config[0]) {
67b2a31f
CD
877 int ret;
878
23f2ebd1
SR
879 /* Revert back to original port config
880 * Also clear internal loopback
881 */
67b2a31f 882 ret = qla81xx_reset_loopback_mode(vha,
f356bef1 883 new_config, 0, 1);
67b2a31f
CD
884 if (ret) {
885 /*
886 * If the reset of the loopback mode
887 * doesn't work take FCoE dump and then
888 * reset the chip.
889 */
890 ha->isp_ops->fw_dump(vha, 0);
891 set_bit(ISP_ABORT_NEEDED,
892 &vha->dpc_flags);
893 }
894
23f2ebd1
SR
895 }
896
23f2ebd1
SR
897 } else {
898 type = "FC_BSG_HST_VENDOR_LOOPBACK";
7c3df132
SK
899 ql_dbg(ql_dbg_user, vha, 0x702b,
900 "BSG request type: %s.\n", type);
23f2ebd1
SR
901 command_sent = INT_DEF_LB_LOOPBACK_CMD;
902 rval = qla2x00_loopback_test(vha, &elreq, response);
6e98016c 903 }
6e98016c
GM
904 }
905
906 if (rval) {
7c3df132
SK
907 ql_log(ql_log_warn, vha, 0x702c,
908 "Vendor request %s failed.\n", type);
6e98016c 909
6e98016c 910 rval = 0;
01e0e15c
JT
911 bsg_reply->result = (DID_ERROR << 16);
912 bsg_reply->reply_payload_rcv_len = 0;
6e98016c 913 } else {
7c3df132
SK
914 ql_dbg(ql_dbg_user, vha, 0x702d,
915 "Vendor request %s completed.\n", type);
01e0e15c 916 bsg_reply->result = (DID_OK << 16);
6e98016c
GM
917 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
918 bsg_job->reply_payload.sg_cnt, rsp_data,
919 rsp_data_len);
920 }
63ea923a
AB
921
922 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
923 sizeof(response) + sizeof(uint8_t);
924 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
925 sizeof(struct fc_bsg_reply);
926 memcpy(fw_sts_ptr, response, sizeof(response));
927 fw_sts_ptr += sizeof(response);
928 *fw_sts_ptr = command_sent;
6e98016c 929
9bceab4e 930done_free_dma_rsp:
6e98016c
GM
931 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
932 rsp_data, rsp_data_dma);
933done_free_dma_req:
934 dma_free_coherent(&ha->pdev->dev, req_data_len,
935 req_data, req_data_dma);
936done_unmap_sg:
937 dma_unmap_sg(&ha->pdev->dev,
938 bsg_job->reply_payload.sg_list,
939 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
940done_unmap_req_sg:
941 dma_unmap_sg(&ha->pdev->dev,
942 bsg_job->request_payload.sg_list,
943 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
63ea923a 944 if (!rval)
06548160 945 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 946 bsg_reply->reply_payload_rcv_len);
6c452a45 947 return rval;
6e98016c
GM
948}
949
950static int
75cc8cfc 951qla84xx_reset(struct bsg_job *bsg_job)
6e98016c 952{
01e0e15c 953 struct fc_bsg_request *bsg_request = bsg_job->request;
cd21c605 954 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
01e0e15c 955 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
6e98016c
GM
956 scsi_qla_host_t *vha = shost_priv(host);
957 struct qla_hw_data *ha = vha->hw;
958 int rval = 0;
959 uint32_t flag;
960
6e98016c 961 if (!IS_QLA84XX(ha)) {
7c3df132 962 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
6e98016c
GM
963 return -EINVAL;
964 }
965
01e0e15c 966 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
6e98016c
GM
967
968 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
969
970 if (rval) {
7c3df132
SK
971 ql_log(ql_log_warn, vha, 0x7030,
972 "Vendor request 84xx reset failed.\n");
63ea923a 973 rval = (DID_ERROR << 16);
6e98016c
GM
974
975 } else {
7c3df132
SK
976 ql_dbg(ql_dbg_user, vha, 0x7031,
977 "Vendor request 84xx reset completed.\n");
01e0e15c 978 bsg_reply->result = DID_OK;
06548160 979 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 980 bsg_reply->reply_payload_rcv_len);
6e98016c
GM
981 }
982
6e98016c
GM
983 return rval;
984}
985
986static int
75cc8cfc 987qla84xx_updatefw(struct bsg_job *bsg_job)
6e98016c 988{
01e0e15c
JT
989 struct fc_bsg_request *bsg_request = bsg_job->request;
990 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 991 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
6e98016c
GM
992 scsi_qla_host_t *vha = shost_priv(host);
993 struct qla_hw_data *ha = vha->hw;
994 struct verify_chip_entry_84xx *mn = NULL;
995 dma_addr_t mn_dma, fw_dma;
996 void *fw_buf = NULL;
997 int rval = 0;
998 uint32_t sg_cnt;
999 uint32_t data_len;
1000 uint16_t options;
1001 uint32_t flag;
1002 uint32_t fw_ver;
1003
6e98016c 1004 if (!IS_QLA84XX(ha)) {
7c3df132
SK
1005 ql_dbg(ql_dbg_user, vha, 0x7032,
1006 "Not 84xx, exiting.\n");
6e98016c
GM
1007 return -EINVAL;
1008 }
1009
1010 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1011 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
7c3df132
SK
1012 if (!sg_cnt) {
1013 ql_log(ql_log_warn, vha, 0x7033,
1014 "dma_map_sg returned %d for request.\n", sg_cnt);
6e98016c 1015 return -ENOMEM;
7c3df132 1016 }
6e98016c
GM
1017
1018 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
7c3df132
SK
1019 ql_log(ql_log_warn, vha, 0x7034,
1020 "DMA mapping resulted in different sg counts, "
1021 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1022 bsg_job->request_payload.sg_cnt, sg_cnt);
6e98016c
GM
1023 rval = -EAGAIN;
1024 goto done_unmap_sg;
1025 }
1026
1027 data_len = bsg_job->request_payload.payload_len;
1028 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1029 &fw_dma, GFP_KERNEL);
1030 if (!fw_buf) {
7c3df132
SK
1031 ql_log(ql_log_warn, vha, 0x7035,
1032 "DMA alloc failed for fw_buf.\n");
6e98016c
GM
1033 rval = -ENOMEM;
1034 goto done_unmap_sg;
1035 }
1036
1037 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1038 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1039
1040 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1041 if (!mn) {
7c3df132
SK
1042 ql_log(ql_log_warn, vha, 0x7036,
1043 "DMA alloc failed for fw buffer.\n");
6e98016c
GM
1044 rval = -ENOMEM;
1045 goto done_free_fw_buf;
1046 }
1047
01e0e15c 1048 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
6e98016c
GM
1049 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1050
1051 memset(mn, 0, sizeof(struct access_chip_84xx));
1052 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1053 mn->entry_count = 1;
1054
1055 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1056 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1057 options |= VCO_DIAG_FW;
1058
1059 mn->options = cpu_to_le16(options);
1060 mn->fw_ver = cpu_to_le32(fw_ver);
1061 mn->fw_size = cpu_to_le32(data_len);
1062 mn->fw_seq_size = cpu_to_le32(data_len);
1063 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1064 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1065 mn->dseg_length = cpu_to_le32(data_len);
1066 mn->data_seg_cnt = cpu_to_le16(1);
1067
1068 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1069
1070 if (rval) {
7c3df132
SK
1071 ql_log(ql_log_warn, vha, 0x7037,
1072 "Vendor request 84xx updatefw failed.\n");
6e98016c 1073
63ea923a 1074 rval = (DID_ERROR << 16);
6e98016c 1075 } else {
7c3df132
SK
1076 ql_dbg(ql_dbg_user, vha, 0x7038,
1077 "Vendor request 84xx updatefw completed.\n");
6e98016c
GM
1078
1079 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 1080 bsg_reply->result = DID_OK;
6e98016c
GM
1081 }
1082
6e98016c
GM
1083 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1084
1085done_free_fw_buf:
1086 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1087
1088done_unmap_sg:
1089 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1090 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1091
63ea923a 1092 if (!rval)
06548160 1093 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1094 bsg_reply->reply_payload_rcv_len);
6e98016c
GM
1095 return rval;
1096}
1097
1098static int
75cc8cfc 1099qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
6e98016c 1100{
01e0e15c
JT
1101 struct fc_bsg_request *bsg_request = bsg_job->request;
1102 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1103 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
6e98016c
GM
1104 scsi_qla_host_t *vha = shost_priv(host);
1105 struct qla_hw_data *ha = vha->hw;
1106 struct access_chip_84xx *mn = NULL;
1107 dma_addr_t mn_dma, mgmt_dma;
1108 void *mgmt_b = NULL;
1109 int rval = 0;
1110 struct qla_bsg_a84_mgmt *ql84_mgmt;
1111 uint32_t sg_cnt;
d5459083 1112 uint32_t data_len = 0;
6e98016c
GM
1113 uint32_t dma_direction = DMA_NONE;
1114
6e98016c 1115 if (!IS_QLA84XX(ha)) {
7c3df132
SK
1116 ql_log(ql_log_warn, vha, 0x703a,
1117 "Not 84xx, exiting.\n");
6e98016c
GM
1118 return -EINVAL;
1119 }
1120
6e98016c
GM
1121 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1122 if (!mn) {
7c3df132
SK
1123 ql_log(ql_log_warn, vha, 0x703c,
1124 "DMA alloc failed for fw buffer.\n");
6e98016c
GM
1125 return -ENOMEM;
1126 }
1127
1128 memset(mn, 0, sizeof(struct access_chip_84xx));
1129 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1130 mn->entry_count = 1;
01e0e15c 1131 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
6e98016c
GM
1132 switch (ql84_mgmt->mgmt.cmd) {
1133 case QLA84_MGMT_READ_MEM:
1134 case QLA84_MGMT_GET_INFO:
1135 sg_cnt = dma_map_sg(&ha->pdev->dev,
1136 bsg_job->reply_payload.sg_list,
1137 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1138 if (!sg_cnt) {
7c3df132
SK
1139 ql_log(ql_log_warn, vha, 0x703d,
1140 "dma_map_sg returned %d for reply.\n", sg_cnt);
6e98016c
GM
1141 rval = -ENOMEM;
1142 goto exit_mgmt;
1143 }
1144
1145 dma_direction = DMA_FROM_DEVICE;
1146
1147 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
7c3df132
SK
1148 ql_log(ql_log_warn, vha, 0x703e,
1149 "DMA mapping resulted in different sg counts, "
1150 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1151 bsg_job->reply_payload.sg_cnt, sg_cnt);
6e98016c
GM
1152 rval = -EAGAIN;
1153 goto done_unmap_sg;
1154 }
1155
1156 data_len = bsg_job->reply_payload.payload_len;
1157
1158 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1159 &mgmt_dma, GFP_KERNEL);
1160 if (!mgmt_b) {
7c3df132
SK
1161 ql_log(ql_log_warn, vha, 0x703f,
1162 "DMA alloc failed for mgmt_b.\n");
6e98016c
GM
1163 rval = -ENOMEM;
1164 goto done_unmap_sg;
1165 }
1166
1167 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1168 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1169 mn->parameter1 =
1170 cpu_to_le32(
1171 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1172
1173 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1174 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1175 mn->parameter1 =
1176 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1177
1178 mn->parameter2 =
1179 cpu_to_le32(
1180 ql84_mgmt->mgmt.mgmtp.u.info.context);
1181 }
1182 break;
1183
1184 case QLA84_MGMT_WRITE_MEM:
1185 sg_cnt = dma_map_sg(&ha->pdev->dev,
1186 bsg_job->request_payload.sg_list,
1187 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1188
1189 if (!sg_cnt) {
7c3df132
SK
1190 ql_log(ql_log_warn, vha, 0x7040,
1191 "dma_map_sg returned %d.\n", sg_cnt);
6e98016c
GM
1192 rval = -ENOMEM;
1193 goto exit_mgmt;
1194 }
1195
1196 dma_direction = DMA_TO_DEVICE;
1197
1198 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
7c3df132
SK
1199 ql_log(ql_log_warn, vha, 0x7041,
1200 "DMA mapping resulted in different sg counts, "
1201 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1202 bsg_job->request_payload.sg_cnt, sg_cnt);
6e98016c
GM
1203 rval = -EAGAIN;
1204 goto done_unmap_sg;
1205 }
1206
1207 data_len = bsg_job->request_payload.payload_len;
1208 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1209 &mgmt_dma, GFP_KERNEL);
1210 if (!mgmt_b) {
7c3df132
SK
1211 ql_log(ql_log_warn, vha, 0x7042,
1212 "DMA alloc failed for mgmt_b.\n");
6e98016c
GM
1213 rval = -ENOMEM;
1214 goto done_unmap_sg;
1215 }
1216
1217 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1218 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1219
1220 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1221 mn->parameter1 =
1222 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1223 break;
1224
1225 case QLA84_MGMT_CHNG_CONFIG:
1226 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1227 mn->parameter1 =
1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1229
1230 mn->parameter2 =
1231 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1232
1233 mn->parameter3 =
1234 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1235 break;
1236
1237 default:
1238 rval = -EIO;
1239 goto exit_mgmt;
1240 }
1241
1242 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1243 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1244 mn->dseg_count = cpu_to_le16(1);
1245 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1246 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1247 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1248 }
1249
1250 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1251
1252 if (rval) {
7c3df132
SK
1253 ql_log(ql_log_warn, vha, 0x7043,
1254 "Vendor request 84xx mgmt failed.\n");
6e98016c 1255
63ea923a 1256 rval = (DID_ERROR << 16);
6e98016c
GM
1257
1258 } else {
7c3df132
SK
1259 ql_dbg(ql_dbg_user, vha, 0x7044,
1260 "Vendor request 84xx mgmt completed.\n");
6e98016c
GM
1261
1262 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 1263 bsg_reply->result = DID_OK;
6e98016c
GM
1264
1265 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1266 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
01e0e15c 1267 bsg_reply->reply_payload_rcv_len =
6e98016c
GM
1268 bsg_job->reply_payload.payload_len;
1269
1270 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
6c452a45
AV
1271 bsg_job->reply_payload.sg_cnt, mgmt_b,
1272 data_len);
6e98016c
GM
1273 }
1274 }
1275
6e98016c 1276done_unmap_sg:
d5459083
HZ
1277 if (mgmt_b)
1278 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1279
6e98016c
GM
1280 if (dma_direction == DMA_TO_DEVICE)
1281 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1282 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1283 else if (dma_direction == DMA_FROM_DEVICE)
1284 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1285 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1286
1287exit_mgmt:
1288 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1289
63ea923a 1290 if (!rval)
06548160 1291 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1292 bsg_reply->reply_payload_rcv_len);
6e98016c
GM
1293 return rval;
1294}
1295
1296static int
75cc8cfc 1297qla24xx_iidma(struct bsg_job *bsg_job)
6e98016c 1298{
01e0e15c
JT
1299 struct fc_bsg_request *bsg_request = bsg_job->request;
1300 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1301 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
6e98016c 1302 scsi_qla_host_t *vha = shost_priv(host);
6e98016c
GM
1303 int rval = 0;
1304 struct qla_port_param *port_param = NULL;
1305 fc_port_t *fcport = NULL;
e8b8b8ad 1306 int found = 0;
6e98016c
GM
1307 uint16_t mb[MAILBOX_REGISTER_COUNT];
1308 uint8_t *rsp_ptr = NULL;
1309
6e98016c 1310 if (!IS_IIDMA_CAPABLE(vha->hw)) {
7c3df132 1311 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
6e98016c
GM
1312 return -EINVAL;
1313 }
1314
01e0e15c 1315 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
6e98016c 1316 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
7c3df132
SK
1317 ql_log(ql_log_warn, vha, 0x7048,
1318 "Invalid destination type.\n");
6e98016c
GM
1319 return -EINVAL;
1320 }
1321
1322 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1323 if (fcport->port_type != FCT_TARGET)
1324 continue;
1325
1326 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1327 fcport->port_name, sizeof(fcport->port_name)))
1328 continue;
e8b8b8ad
JC
1329
1330 found = 1;
6e98016c
GM
1331 break;
1332 }
1333
e8b8b8ad 1334 if (!found) {
7c3df132
SK
1335 ql_log(ql_log_warn, vha, 0x7049,
1336 "Failed to find port.\n");
6e98016c
GM
1337 return -EINVAL;
1338 }
1339
c9afb9a2 1340 if (atomic_read(&fcport->state) != FCS_ONLINE) {
7c3df132
SK
1341 ql_log(ql_log_warn, vha, 0x704a,
1342 "Port is not online.\n");
17cf2c5d
MI
1343 return -EINVAL;
1344 }
1345
9a15eb4b 1346 if (fcport->flags & FCF_LOGIN_NEEDED) {
7c3df132
SK
1347 ql_log(ql_log_warn, vha, 0x704b,
1348 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
9a15eb4b
MI
1349 return -EINVAL;
1350 }
1351
6e98016c
GM
1352 if (port_param->mode)
1353 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1354 port_param->speed, mb);
1355 else
1356 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1357 &port_param->speed, mb);
1358
1359 if (rval) {
7c3df132 1360 ql_log(ql_log_warn, vha, 0x704c,
7b833558
OK
1361 "iIDMA cmd failed for %8phN -- "
1362 "%04x %x %04x %04x.\n", fcport->port_name,
1363 rval, fcport->fp_speed, mb[0], mb[1]);
63ea923a 1364 rval = (DID_ERROR << 16);
6e98016c
GM
1365 } else {
1366 if (!port_param->mode) {
1367 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1368 sizeof(struct qla_port_param);
1369
01e0e15c 1370 rsp_ptr = ((uint8_t *)bsg_reply) +
6e98016c
GM
1371 sizeof(struct fc_bsg_reply);
1372
1373 memcpy(rsp_ptr, port_param,
1374 sizeof(struct qla_port_param));
1375 }
1376
01e0e15c 1377 bsg_reply->result = DID_OK;
06548160 1378 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1379 bsg_reply->reply_payload_rcv_len);
6e98016c
GM
1380 }
1381
6e98016c
GM
1382 return rval;
1383}
1384
f19af163 1385static int
75cc8cfc 1386qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
f19af163
HZ
1387 uint8_t is_update)
1388{
01e0e15c 1389 struct fc_bsg_request *bsg_request = bsg_job->request;
f19af163
HZ
1390 uint32_t start = 0;
1391 int valid = 0;
7c3df132 1392 struct qla_hw_data *ha = vha->hw;
f19af163 1393
f19af163
HZ
1394 if (unlikely(pci_channel_offline(ha->pdev)))
1395 return -EINVAL;
1396
01e0e15c 1397 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
7c3df132
SK
1398 if (start > ha->optrom_size) {
1399 ql_log(ql_log_warn, vha, 0x7055,
1400 "start %d > optrom_size %d.\n", start, ha->optrom_size);
f19af163 1401 return -EINVAL;
7c3df132 1402 }
f19af163 1403
7c3df132
SK
1404 if (ha->optrom_state != QLA_SWAITING) {
1405 ql_log(ql_log_info, vha, 0x7056,
1406 "optrom_state %d.\n", ha->optrom_state);
f19af163 1407 return -EBUSY;
7c3df132 1408 }
f19af163
HZ
1409
1410 ha->optrom_region_start = start;
7c3df132 1411 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
f19af163
HZ
1412 if (is_update) {
1413 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1414 valid = 1;
1415 else if (start == (ha->flt_region_boot * 4) ||
1416 start == (ha->flt_region_fw * 4))
1417 valid = 1;
1418 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
9a6e6400 1419 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
f19af163
HZ
1420 valid = 1;
1421 if (!valid) {
7c3df132
SK
1422 ql_log(ql_log_warn, vha, 0x7058,
1423 "Invalid start region 0x%x/0x%x.\n", start,
1424 bsg_job->request_payload.payload_len);
f19af163
HZ
1425 return -EINVAL;
1426 }
1427
1428 ha->optrom_region_size = start +
1429 bsg_job->request_payload.payload_len > ha->optrom_size ?
1430 ha->optrom_size - start :
1431 bsg_job->request_payload.payload_len;
1432 ha->optrom_state = QLA_SWRITING;
1433 } else {
1434 ha->optrom_region_size = start +
1435 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1436 ha->optrom_size - start :
1437 bsg_job->reply_payload.payload_len;
1438 ha->optrom_state = QLA_SREADING;
1439 }
1440
1441 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1442 if (!ha->optrom_buffer) {
7c3df132 1443 ql_log(ql_log_warn, vha, 0x7059,
f19af163 1444 "Read: Unable to allocate memory for optrom retrieval "
7c3df132 1445 "(%x)\n", ha->optrom_region_size);
f19af163
HZ
1446
1447 ha->optrom_state = QLA_SWAITING;
1448 return -ENOMEM;
1449 }
1450
1451 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1452 return 0;
1453}
1454
1455static int
75cc8cfc 1456qla2x00_read_optrom(struct bsg_job *bsg_job)
f19af163 1457{
01e0e15c 1458 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1459 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
f19af163
HZ
1460 scsi_qla_host_t *vha = shost_priv(host);
1461 struct qla_hw_data *ha = vha->hw;
1462 int rval = 0;
1463
7d613ac6 1464 if (ha->flags.nic_core_reset_hdlr_active)
a49393f2
GM
1465 return -EBUSY;
1466
7a8ab9c8 1467 mutex_lock(&ha->optrom_mutex);
7c3df132 1468 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
7a8ab9c8
CD
1469 if (rval) {
1470 mutex_unlock(&ha->optrom_mutex);
f19af163 1471 return rval;
7a8ab9c8 1472 }
f19af163
HZ
1473
1474 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1475 ha->optrom_region_start, ha->optrom_region_size);
1476
1477 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1478 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1479 ha->optrom_region_size);
1480
01e0e15c
JT
1481 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1482 bsg_reply->result = DID_OK;
f19af163
HZ
1483 vfree(ha->optrom_buffer);
1484 ha->optrom_buffer = NULL;
1485 ha->optrom_state = QLA_SWAITING;
7a8ab9c8 1486 mutex_unlock(&ha->optrom_mutex);
06548160 1487 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1488 bsg_reply->reply_payload_rcv_len);
f19af163
HZ
1489 return rval;
1490}
1491
1492static int
75cc8cfc 1493qla2x00_update_optrom(struct bsg_job *bsg_job)
f19af163 1494{
01e0e15c 1495 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1496 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
f19af163
HZ
1497 scsi_qla_host_t *vha = shost_priv(host);
1498 struct qla_hw_data *ha = vha->hw;
1499 int rval = 0;
1500
7a8ab9c8 1501 mutex_lock(&ha->optrom_mutex);
7c3df132 1502 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
7a8ab9c8
CD
1503 if (rval) {
1504 mutex_unlock(&ha->optrom_mutex);
f19af163 1505 return rval;
7a8ab9c8 1506 }
f19af163 1507
b6d0d9d5
GM
1508 /* Set the isp82xx_no_md_cap not to capture minidump */
1509 ha->flags.isp82xx_no_md_cap = 1;
1510
f19af163
HZ
1511 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1512 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1513 ha->optrom_region_size);
1514
1515 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1516 ha->optrom_region_start, ha->optrom_region_size);
1517
01e0e15c 1518 bsg_reply->result = DID_OK;
f19af163
HZ
1519 vfree(ha->optrom_buffer);
1520 ha->optrom_buffer = NULL;
1521 ha->optrom_state = QLA_SWAITING;
7a8ab9c8 1522 mutex_unlock(&ha->optrom_mutex);
06548160 1523 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1524 bsg_reply->reply_payload_rcv_len);
f19af163
HZ
1525 return rval;
1526}
1527
697a4bc6 1528static int
75cc8cfc 1529qla2x00_update_fru_versions(struct bsg_job *bsg_job)
697a4bc6 1530{
01e0e15c 1531 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1532 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
697a4bc6
JC
1533 scsi_qla_host_t *vha = shost_priv(host);
1534 struct qla_hw_data *ha = vha->hw;
1535 int rval = 0;
1536 uint8_t bsg[DMA_POOL_SIZE];
1537 struct qla_image_version_list *list = (void *)bsg;
1538 struct qla_image_version *image;
1539 uint32_t count;
1540 dma_addr_t sfp_dma;
1541 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1542 if (!sfp) {
01e0e15c 1543 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
697a4bc6
JC
1544 EXT_STATUS_NO_MEMORY;
1545 goto done;
1546 }
1547
1548 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1549 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1550
1551 image = list->version;
1552 count = list->count;
1553 while (count--) {
1554 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1555 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1556 image->field_address.device, image->field_address.offset,
1557 sizeof(image->field_info), image->field_address.option);
1558 if (rval) {
01e0e15c 1559 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
697a4bc6
JC
1560 EXT_STATUS_MAILBOX;
1561 goto dealloc;
1562 }
1563 image++;
1564 }
1565
01e0e15c 1566 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
697a4bc6
JC
1567
1568dealloc:
1569 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1570
1571done:
1572 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 1573 bsg_reply->result = DID_OK << 16;
06548160 1574 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1575 bsg_reply->reply_payload_rcv_len);
697a4bc6
JC
1576
1577 return 0;
1578}
1579
1580static int
75cc8cfc 1581qla2x00_read_fru_status(struct bsg_job *bsg_job)
697a4bc6 1582{
01e0e15c 1583 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1584 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
697a4bc6
JC
1585 scsi_qla_host_t *vha = shost_priv(host);
1586 struct qla_hw_data *ha = vha->hw;
1587 int rval = 0;
1588 uint8_t bsg[DMA_POOL_SIZE];
1589 struct qla_status_reg *sr = (void *)bsg;
1590 dma_addr_t sfp_dma;
1591 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1592 if (!sfp) {
01e0e15c 1593 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
697a4bc6
JC
1594 EXT_STATUS_NO_MEMORY;
1595 goto done;
1596 }
1597
1598 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1599 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1600
1601 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1602 sr->field_address.device, sr->field_address.offset,
1603 sizeof(sr->status_reg), sr->field_address.option);
1604 sr->status_reg = *sfp;
1605
1606 if (rval) {
01e0e15c 1607 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
697a4bc6
JC
1608 EXT_STATUS_MAILBOX;
1609 goto dealloc;
1610 }
1611
1612 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1613 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1614
01e0e15c 1615 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
697a4bc6
JC
1616
1617dealloc:
1618 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1619
1620done:
1621 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c
JT
1622 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1623 bsg_reply->result = DID_OK << 16;
06548160 1624 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1625 bsg_reply->reply_payload_rcv_len);
697a4bc6
JC
1626
1627 return 0;
1628}
1629
1630static int
75cc8cfc 1631qla2x00_write_fru_status(struct bsg_job *bsg_job)
697a4bc6 1632{
01e0e15c 1633 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1634 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
697a4bc6
JC
1635 scsi_qla_host_t *vha = shost_priv(host);
1636 struct qla_hw_data *ha = vha->hw;
1637 int rval = 0;
1638 uint8_t bsg[DMA_POOL_SIZE];
1639 struct qla_status_reg *sr = (void *)bsg;
1640 dma_addr_t sfp_dma;
1641 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1642 if (!sfp) {
01e0e15c 1643 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
697a4bc6
JC
1644 EXT_STATUS_NO_MEMORY;
1645 goto done;
1646 }
1647
1648 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1649 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1650
1651 *sfp = sr->status_reg;
1652 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1653 sr->field_address.device, sr->field_address.offset,
1654 sizeof(sr->status_reg), sr->field_address.option);
1655
1656 if (rval) {
01e0e15c 1657 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
697a4bc6
JC
1658 EXT_STATUS_MAILBOX;
1659 goto dealloc;
1660 }
1661
01e0e15c 1662 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
697a4bc6
JC
1663
1664dealloc:
1665 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1666
1667done:
1668 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 1669 bsg_reply->result = DID_OK << 16;
06548160 1670 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1671 bsg_reply->reply_payload_rcv_len);
697a4bc6
JC
1672
1673 return 0;
1674}
1675
9ebb5d9c 1676static int
75cc8cfc 1677qla2x00_write_i2c(struct bsg_job *bsg_job)
9ebb5d9c 1678{
01e0e15c 1679 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1680 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
9ebb5d9c
JC
1681 scsi_qla_host_t *vha = shost_priv(host);
1682 struct qla_hw_data *ha = vha->hw;
1683 int rval = 0;
1684 uint8_t bsg[DMA_POOL_SIZE];
1685 struct qla_i2c_access *i2c = (void *)bsg;
1686 dma_addr_t sfp_dma;
1687 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1688 if (!sfp) {
01e0e15c 1689 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
9ebb5d9c
JC
1690 EXT_STATUS_NO_MEMORY;
1691 goto done;
1692 }
1693
1694 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1695 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1696
1697 memcpy(sfp, i2c->buffer, i2c->length);
1698 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1699 i2c->device, i2c->offset, i2c->length, i2c->option);
1700
1701 if (rval) {
01e0e15c 1702 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
9ebb5d9c
JC
1703 EXT_STATUS_MAILBOX;
1704 goto dealloc;
1705 }
1706
01e0e15c 1707 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
9ebb5d9c
JC
1708
1709dealloc:
1710 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1711
1712done:
1713 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 1714 bsg_reply->result = DID_OK << 16;
06548160 1715 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1716 bsg_reply->reply_payload_rcv_len);
9ebb5d9c
JC
1717
1718 return 0;
1719}
1720
1721static int
75cc8cfc 1722qla2x00_read_i2c(struct bsg_job *bsg_job)
9ebb5d9c 1723{
01e0e15c 1724 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1725 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
9ebb5d9c
JC
1726 scsi_qla_host_t *vha = shost_priv(host);
1727 struct qla_hw_data *ha = vha->hw;
1728 int rval = 0;
1729 uint8_t bsg[DMA_POOL_SIZE];
1730 struct qla_i2c_access *i2c = (void *)bsg;
1731 dma_addr_t sfp_dma;
1732 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1733 if (!sfp) {
01e0e15c 1734 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
9ebb5d9c
JC
1735 EXT_STATUS_NO_MEMORY;
1736 goto done;
1737 }
1738
1739 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1740 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1741
1742 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1743 i2c->device, i2c->offset, i2c->length, i2c->option);
1744
1745 if (rval) {
01e0e15c 1746 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
9ebb5d9c
JC
1747 EXT_STATUS_MAILBOX;
1748 goto dealloc;
1749 }
1750
1751 memcpy(i2c->buffer, sfp, i2c->length);
1752 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1753 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1754
01e0e15c 1755 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
9ebb5d9c
JC
1756
1757dealloc:
1758 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1759
1760done:
1761 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c
JT
1762 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1763 bsg_reply->result = DID_OK << 16;
06548160 1764 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1765 bsg_reply->reply_payload_rcv_len);
9ebb5d9c
JC
1766
1767 return 0;
1768}
1769
a9b6f722 1770static int
75cc8cfc 1771qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
a9b6f722 1772{
01e0e15c 1773 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 1774 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
a9b6f722
SK
1775 scsi_qla_host_t *vha = shost_priv(host);
1776 struct qla_hw_data *ha = vha->hw;
a9b6f722
SK
1777 uint32_t rval = EXT_STATUS_OK;
1778 uint16_t req_sg_cnt = 0;
1779 uint16_t rsp_sg_cnt = 0;
1780 uint16_t nextlid = 0;
1781 uint32_t tot_dsds;
1782 srb_t *sp = NULL;
1783 uint32_t req_data_len = 0;
1784 uint32_t rsp_data_len = 0;
1785
1786 /* Check the type of the adapter */
1787 if (!IS_BIDI_CAPABLE(ha)) {
1788 ql_log(ql_log_warn, vha, 0x70a0,
1789 "This adapter is not supported\n");
1790 rval = EXT_STATUS_NOT_SUPPORTED;
1791 goto done;
1792 }
1793
1794 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1795 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1796 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1797 rval = EXT_STATUS_BUSY;
1798 goto done;
1799 }
1800
1801 /* Check if host is online */
1802 if (!vha->flags.online) {
1803 ql_log(ql_log_warn, vha, 0x70a1,
1804 "Host is not online\n");
1805 rval = EXT_STATUS_DEVICE_OFFLINE;
1806 goto done;
1807 }
1808
1809 /* Check if cable is plugged in or not */
1810 if (vha->device_flags & DFLG_NO_CABLE) {
1811 ql_log(ql_log_warn, vha, 0x70a2,
1812 "Cable is unplugged...\n");
1813 rval = EXT_STATUS_INVALID_CFG;
1814 goto done;
1815 }
1816
1817 /* Check if the switch is connected or not */
1818 if (ha->current_topology != ISP_CFG_F) {
1819 ql_log(ql_log_warn, vha, 0x70a3,
1820 "Host is not connected to the switch\n");
1821 rval = EXT_STATUS_INVALID_CFG;
1822 goto done;
1823 }
1824
1825 /* Check if operating mode is P2P */
1826 if (ha->operating_mode != P2P) {
1827 ql_log(ql_log_warn, vha, 0x70a4,
1828 "Host is operating mode is not P2p\n");
1829 rval = EXT_STATUS_INVALID_CFG;
1830 goto done;
1831 }
1832
a9b6f722
SK
1833 mutex_lock(&ha->selflogin_lock);
1834 if (vha->self_login_loop_id == 0) {
1835 /* Initialize all required fields of fcport */
1836 vha->bidir_fcport.vha = vha;
1837 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1838 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1839 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1840 vha->bidir_fcport.loop_id = vha->loop_id;
1841
1842 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1843 ql_log(ql_log_warn, vha, 0x70a7,
1844 "Failed to login port %06X for bidirectional IOCB\n",
1845 vha->bidir_fcport.d_id.b24);
1846 mutex_unlock(&ha->selflogin_lock);
1847 rval = EXT_STATUS_MAILBOX;
1848 goto done;
1849 }
1850 vha->self_login_loop_id = nextlid - 1;
1851
1852 }
1853 /* Assign the self login loop id to fcport */
1854 mutex_unlock(&ha->selflogin_lock);
1855
1856 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1857
1858 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1859 bsg_job->request_payload.sg_list,
1860 bsg_job->request_payload.sg_cnt,
1861 DMA_TO_DEVICE);
1862
1863 if (!req_sg_cnt) {
1864 rval = EXT_STATUS_NO_MEMORY;
1865 goto done;
1866 }
1867
1868 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1869 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1870 DMA_FROM_DEVICE);
1871
1872 if (!rsp_sg_cnt) {
1873 rval = EXT_STATUS_NO_MEMORY;
1874 goto done_unmap_req_sg;
1875 }
1876
1877 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1878 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1879 ql_dbg(ql_dbg_user, vha, 0x70a9,
1880 "Dma mapping resulted in different sg counts "
1881 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1882 "%x dma_reply_sg_cnt: %x]\n",
1883 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1884 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1885 rval = EXT_STATUS_NO_MEMORY;
1886 goto done_unmap_sg;
1887 }
1888
1889 if (req_data_len != rsp_data_len) {
1890 rval = EXT_STATUS_BUSY;
1891 ql_log(ql_log_warn, vha, 0x70aa,
1892 "req_data_len != rsp_data_len\n");
1893 goto done_unmap_sg;
1894 }
1895
1896 req_data_len = bsg_job->request_payload.payload_len;
1897 rsp_data_len = bsg_job->reply_payload.payload_len;
1898
1899
1900 /* Alloc SRB structure */
1901 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1902 if (!sp) {
1903 ql_dbg(ql_dbg_user, vha, 0x70ac,
1904 "Alloc SRB structure failed\n");
1905 rval = EXT_STATUS_NO_MEMORY;
1906 goto done_unmap_sg;
1907 }
1908
1909 /*Populate srb->ctx with bidir ctx*/
1910 sp->u.bsg_job = bsg_job;
1911 sp->free = qla2x00_bsg_sp_free;
1912 sp->type = SRB_BIDI_CMD;
1913 sp->done = qla2x00_bsg_job_done;
1914
1915 /* Add the read and write sg count */
1916 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1917
1918 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1919 if (rval != EXT_STATUS_OK)
1920 goto done_free_srb;
1921 /* the bsg request will be completed in the interrupt handler */
1922 return rval;
1923
1924done_free_srb:
1925 mempool_free(sp, ha->srb_mempool);
1926done_unmap_sg:
1927 dma_unmap_sg(&ha->pdev->dev,
1928 bsg_job->reply_payload.sg_list,
1929 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1930done_unmap_req_sg:
1931 dma_unmap_sg(&ha->pdev->dev,
1932 bsg_job->request_payload.sg_list,
1933 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1934done:
1935
1936 /* Return an error vendor specific response
1937 * and complete the bsg request
1938 */
01e0e15c 1939 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
a9b6f722 1940 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c
JT
1941 bsg_reply->reply_payload_rcv_len = 0;
1942 bsg_reply->result = (DID_OK) << 16;
06548160 1943 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 1944 bsg_reply->reply_payload_rcv_len);
9e03aa2f 1945 /* Always return success, vendor rsp carries correct status */
a9b6f722
SK
1946 return 0;
1947}
1948
8ae6d9c7 1949static int
75cc8cfc 1950qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
8ae6d9c7 1951{
01e0e15c 1952 struct fc_bsg_request *bsg_request = bsg_job->request;
cd21c605 1953 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
8ae6d9c7
GM
1954 scsi_qla_host_t *vha = shost_priv(host);
1955 struct qla_hw_data *ha = vha->hw;
1956 int rval = (DRIVER_ERROR << 16);
1957 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1958 srb_t *sp;
1959 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1960 struct fc_port *fcport;
1961 char *type = "FC_BSG_HST_FX_MGMT";
1962
1963 /* Copy the IOCB specific information */
1964 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
01e0e15c 1965 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
8ae6d9c7
GM
1966
1967 /* Dump the vendor information */
1968 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1969 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1970
1971 if (!vha->flags.online) {
1972 ql_log(ql_log_warn, vha, 0x70d0,
1973 "Host is not online.\n");
1974 rval = -EIO;
1975 goto done;
1976 }
1977
1978 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1979 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1980 bsg_job->request_payload.sg_list,
1981 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1982 if (!req_sg_cnt) {
1983 ql_log(ql_log_warn, vha, 0x70c7,
1984 "dma_map_sg return %d for request\n", req_sg_cnt);
1985 rval = -ENOMEM;
1986 goto done;
1987 }
1988 }
1989
1990 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1991 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1992 bsg_job->reply_payload.sg_list,
1993 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1994 if (!rsp_sg_cnt) {
1995 ql_log(ql_log_warn, vha, 0x70c8,
1996 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1997 rval = -ENOMEM;
1998 goto done_unmap_req_sg;
1999 }
2000 }
2001
2002 ql_dbg(ql_dbg_user, vha, 0x70c9,
2003 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2004 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2005 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2006
2007 /* Allocate a dummy fcport structure, since functions preparing the
2008 * IOCB and mailbox command retrieves port specific information
2009 * from fcport structure. For Host based ELS commands there will be
2010 * no fcport structure allocated
2011 */
2012 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2013 if (!fcport) {
2014 ql_log(ql_log_warn, vha, 0x70ca,
2015 "Failed to allocate fcport.\n");
2016 rval = -ENOMEM;
2017 goto done_unmap_rsp_sg;
2018 }
2019
2020 /* Alloc SRB structure */
2021 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2022 if (!sp) {
2023 ql_log(ql_log_warn, vha, 0x70cb,
2024 "qla2x00_get_sp failed.\n");
2025 rval = -ENOMEM;
2026 goto done_free_fcport;
2027 }
2028
2029 /* Initialize all required fields of fcport */
2030 fcport->vha = vha;
2031 fcport->loop_id = piocb_rqst->dataword;
2032
2033 sp->type = SRB_FXIOCB_BCMD;
2034 sp->name = "bsg_fx_mgmt";
2035 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2036 sp->u.bsg_job = bsg_job;
2037 sp->free = qla2x00_bsg_sp_free;
2038 sp->done = qla2x00_bsg_job_done;
2039
2040 ql_dbg(ql_dbg_user, vha, 0x70cc,
2041 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2042 type, piocb_rqst->func_type, fcport->loop_id);
2043
2044 rval = qla2x00_start_sp(sp);
2045 if (rval != QLA_SUCCESS) {
2046 ql_log(ql_log_warn, vha, 0x70cd,
2047 "qla2x00_start_sp failed=%d.\n", rval);
2048 mempool_free(sp, ha->srb_mempool);
2049 rval = -EIO;
2050 goto done_free_fcport;
2051 }
2052 return rval;
2053
2054done_free_fcport:
2055 kfree(fcport);
2056
2057done_unmap_rsp_sg:
2058 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2059 dma_unmap_sg(&ha->pdev->dev,
2060 bsg_job->reply_payload.sg_list,
2061 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2062done_unmap_req_sg:
2063 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2064 dma_unmap_sg(&ha->pdev->dev,
2065 bsg_job->request_payload.sg_list,
2066 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2067
2068done:
2069 return rval;
2070}
2071
db64e930 2072static int
75cc8cfc 2073qla26xx_serdes_op(struct bsg_job *bsg_job)
db64e930 2074{
01e0e15c 2075 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 2076 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
db64e930
JC
2077 scsi_qla_host_t *vha = shost_priv(host);
2078 int rval = 0;
2079 struct qla_serdes_reg sr;
2080
2081 memset(&sr, 0, sizeof(sr));
2082
2083 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2084 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2085
2086 switch (sr.cmd) {
2087 case INT_SC_SERDES_WRITE_REG:
2088 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
01e0e15c 2089 bsg_reply->reply_payload_rcv_len = 0;
db64e930
JC
2090 break;
2091 case INT_SC_SERDES_READ_REG:
2092 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2093 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2094 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
01e0e15c 2095 bsg_reply->reply_payload_rcv_len = sizeof(sr);
db64e930
JC
2096 break;
2097 default:
e8887c51 2098 ql_dbg(ql_dbg_user, vha, 0x708c,
db64e930 2099 "Unknown serdes cmd %x.\n", sr.cmd);
e8887c51
JC
2100 rval = -EINVAL;
2101 break;
2102 }
2103
01e0e15c 2104 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
e8887c51
JC
2105 rval ? EXT_STATUS_MAILBOX : 0;
2106
2107 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 2108 bsg_reply->result = DID_OK << 16;
06548160 2109 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 2110 bsg_reply->reply_payload_rcv_len);
e8887c51
JC
2111 return 0;
2112}
2113
2114static int
75cc8cfc 2115qla8044_serdes_op(struct bsg_job *bsg_job)
e8887c51 2116{
01e0e15c 2117 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 2118 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
e8887c51
JC
2119 scsi_qla_host_t *vha = shost_priv(host);
2120 int rval = 0;
2121 struct qla_serdes_reg_ex sr;
2122
2123 memset(&sr, 0, sizeof(sr));
2124
2125 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2126 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2127
2128 switch (sr.cmd) {
2129 case INT_SC_SERDES_WRITE_REG:
2130 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
01e0e15c 2131 bsg_reply->reply_payload_rcv_len = 0;
e8887c51
JC
2132 break;
2133 case INT_SC_SERDES_READ_REG:
2134 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2135 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2136 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
01e0e15c 2137 bsg_reply->reply_payload_rcv_len = sizeof(sr);
e8887c51
JC
2138 break;
2139 default:
2140 ql_dbg(ql_dbg_user, vha, 0x70cf,
2141 "Unknown serdes cmd %x.\n", sr.cmd);
2142 rval = -EINVAL;
db64e930
JC
2143 break;
2144 }
2145
01e0e15c 2146 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
db64e930
JC
2147 rval ? EXT_STATUS_MAILBOX : 0;
2148
2149 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 2150 bsg_reply->result = DID_OK << 16;
06548160 2151 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 2152 bsg_reply->reply_payload_rcv_len);
db64e930
JC
2153 return 0;
2154}
2155
4243c115 2156static int
75cc8cfc 2157qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
4243c115 2158{
01e0e15c 2159 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 2160 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
4243c115
SC
2161 scsi_qla_host_t *vha = shost_priv(host);
2162 struct qla_hw_data *ha = vha->hw;
2163 struct qla_flash_update_caps cap;
2164
2165 if (!(IS_QLA27XX(ha)))
2166 return -EPERM;
2167
2168 memset(&cap, 0, sizeof(cap));
2169 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2170 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2171 (uint64_t)ha->fw_attributes_h << 16 |
2172 (uint64_t)ha->fw_attributes;
2173
2174 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2175 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
01e0e15c 2176 bsg_reply->reply_payload_rcv_len = sizeof(cap);
4243c115 2177
01e0e15c 2178 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
4243c115
SC
2179 EXT_STATUS_OK;
2180
2181 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 2182 bsg_reply->result = DID_OK << 16;
06548160 2183 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 2184 bsg_reply->reply_payload_rcv_len);
4243c115
SC
2185 return 0;
2186}
2187
2188static int
75cc8cfc 2189qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
4243c115 2190{
01e0e15c 2191 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 2192 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
4243c115
SC
2193 scsi_qla_host_t *vha = shost_priv(host);
2194 struct qla_hw_data *ha = vha->hw;
2195 uint64_t online_fw_attr = 0;
2196 struct qla_flash_update_caps cap;
2197
2198 if (!(IS_QLA27XX(ha)))
2199 return -EPERM;
2200
2201 memset(&cap, 0, sizeof(cap));
2202 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2203 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2204
2205 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2206 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2207 (uint64_t)ha->fw_attributes_h << 16 |
2208 (uint64_t)ha->fw_attributes;
2209
2210 if (online_fw_attr != cap.capabilities) {
01e0e15c 2211 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
4243c115
SC
2212 EXT_STATUS_INVALID_PARAM;
2213 return -EINVAL;
2214 }
2215
2216 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
01e0e15c 2217 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
4243c115
SC
2218 EXT_STATUS_INVALID_PARAM;
2219 return -EINVAL;
2220 }
2221
01e0e15c 2222 bsg_reply->reply_payload_rcv_len = 0;
4243c115 2223
01e0e15c 2224 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
4243c115
SC
2225 EXT_STATUS_OK;
2226
2227 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 2228 bsg_reply->result = DID_OK << 16;
06548160 2229 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 2230 bsg_reply->reply_payload_rcv_len);
4243c115
SC
2231 return 0;
2232}
2233
969a6199 2234static int
75cc8cfc 2235qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
969a6199 2236{
01e0e15c 2237 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 2238 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
969a6199
SC
2239 scsi_qla_host_t *vha = shost_priv(host);
2240 struct qla_hw_data *ha = vha->hw;
2241 struct qla_bbcr_data bbcr;
2242 uint16_t loop_id, topo, sw_cap;
2243 uint8_t domain, area, al_pa, state;
2244 int rval;
2245
2246 if (!(IS_QLA27XX(ha)))
2247 return -EPERM;
2248
2249 memset(&bbcr, 0, sizeof(bbcr));
2250
2251 if (vha->flags.bbcr_enable)
2252 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2253 else
2254 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2255
2256 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2257 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2258 &area, &domain, &topo, &sw_cap);
c73191b8
HZ
2259 if (rval != QLA_SUCCESS) {
2260 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2261 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2262 bbcr.mbx1 = loop_id;
2263 goto done;
2264 }
969a6199
SC
2265
2266 state = (vha->bbcr >> 12) & 0x1;
2267
2268 if (state) {
2269 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2270 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2271 } else {
2272 bbcr.state = QLA_BBCR_STATE_ONLINE;
2273 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2274 }
2275
2276 bbcr.configured_bbscn = vha->bbcr & 0xf;
2277 }
2278
c73191b8 2279done:
969a6199
SC
2280 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2281 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
01e0e15c 2282 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
969a6199 2283
01e0e15c 2284 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
969a6199
SC
2285
2286 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
01e0e15c 2287 bsg_reply->result = DID_OK << 16;
06548160 2288 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 2289 bsg_reply->reply_payload_rcv_len);
969a6199
SC
2290 return 0;
2291}
2292
243de676 2293static int
75cc8cfc 2294qla2x00_get_priv_stats(struct bsg_job *bsg_job)
243de676 2295{
01e0e15c
JT
2296 struct fc_bsg_request *bsg_request = bsg_job->request;
2297 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 2298 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
243de676
HZ
2299 scsi_qla_host_t *vha = shost_priv(host);
2300 struct qla_hw_data *ha = vha->hw;
2301 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2302 struct link_statistics *stats = NULL;
2303 dma_addr_t stats_dma;
8437dda0 2304 int rval;
01e0e15c 2305 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
8437dda0 2306 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
243de676
HZ
2307
2308 if (test_bit(UNLOADING, &vha->dpc_flags))
8437dda0 2309 return -ENODEV;
243de676
HZ
2310
2311 if (unlikely(pci_channel_offline(ha->pdev)))
8437dda0 2312 return -ENODEV;
243de676
HZ
2313
2314 if (qla2x00_reset_active(vha))
8437dda0 2315 return -EBUSY;
243de676
HZ
2316
2317 if (!IS_FWI2_CAPABLE(ha))
8437dda0 2318 return -EPERM;
243de676
HZ
2319
2320 stats = dma_alloc_coherent(&ha->pdev->dev,
8437dda0 2321 sizeof(*stats), &stats_dma, GFP_KERNEL);
243de676
HZ
2322 if (!stats) {
2323 ql_log(ql_log_warn, vha, 0x70e2,
8437dda0
SC
2324 "Failed to allocate memory for stats.\n");
2325 return -ENOMEM;
243de676
HZ
2326 }
2327
8437dda0 2328 memset(stats, 0, sizeof(*stats));
243de676 2329
8437dda0 2330 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
243de676 2331
8437dda0
SC
2332 if (rval == QLA_SUCCESS) {
2333 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
2334 (uint8_t *)stats, sizeof(*stats));
2335 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2336 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2337 }
243de676 2338
01e0e15c
JT
2339 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2340 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
8437dda0 2341 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
243de676 2342
01e0e15c
JT
2343 bsg_job->reply_len = sizeof(*bsg_reply);
2344 bsg_reply->result = DID_OK << 16;
06548160 2345 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 2346 bsg_reply->reply_payload_rcv_len);
243de676 2347
8437dda0 2348 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
243de676 2349 stats, stats_dma);
8437dda0
SC
2350
2351 return 0;
243de676
HZ
2352}
2353
ec891462 2354static int
75cc8cfc 2355qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
ec891462 2356{
01e0e15c 2357 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 2358 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
ec891462
JC
2359 scsi_qla_host_t *vha = shost_priv(host);
2360 int rval;
2361 struct qla_dport_diag *dd;
2362
2363 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
2364 return -EPERM;
2365
2366 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2367 if (!dd) {
2368 ql_log(ql_log_warn, vha, 0x70db,
2369 "Failed to allocate memory for dport.\n");
2370 return -ENOMEM;
2371 }
2372
2373 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2374 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2375
2376 rval = qla26xx_dport_diagnostics(
2377 vha, dd->buf, sizeof(dd->buf), dd->options);
2378 if (rval == QLA_SUCCESS) {
2379 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2380 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2381 }
2382
01e0e15c
JT
2383 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2384 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
ec891462
JC
2385 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2386
01e0e15c
JT
2387 bsg_job->reply_len = sizeof(*bsg_reply);
2388 bsg_reply->result = DID_OK << 16;
06548160 2389 bsg_job_done(bsg_job, bsg_reply->result,
1abaede7 2390 bsg_reply->reply_payload_rcv_len);
ec891462
JC
2391
2392 kfree(dd);
2393
2394 return 0;
2395}
2396
6e98016c 2397static int
75cc8cfc 2398qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
6e98016c 2399{
01e0e15c
JT
2400 struct fc_bsg_request *bsg_request = bsg_job->request;
2401
2402 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
6e98016c
GM
2403 case QL_VND_LOOPBACK:
2404 return qla2x00_process_loopback(bsg_job);
2405
2406 case QL_VND_A84_RESET:
2407 return qla84xx_reset(bsg_job);
2408
2409 case QL_VND_A84_UPDATE_FW:
2410 return qla84xx_updatefw(bsg_job);
2411
2412 case QL_VND_A84_MGMT_CMD:
2413 return qla84xx_mgmt_cmd(bsg_job);
2414
2415 case QL_VND_IIDMA:
2416 return qla24xx_iidma(bsg_job);
2417
09ff701a
SR
2418 case QL_VND_FCP_PRIO_CFG_CMD:
2419 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2420
f19af163
HZ
2421 case QL_VND_READ_FLASH:
2422 return qla2x00_read_optrom(bsg_job);
2423
2424 case QL_VND_UPDATE_FLASH:
2425 return qla2x00_update_optrom(bsg_job);
2426
697a4bc6
JC
2427 case QL_VND_SET_FRU_VERSION:
2428 return qla2x00_update_fru_versions(bsg_job);
2429
2430 case QL_VND_READ_FRU_STATUS:
2431 return qla2x00_read_fru_status(bsg_job);
2432
2433 case QL_VND_WRITE_FRU_STATUS:
2434 return qla2x00_write_fru_status(bsg_job);
2435
9ebb5d9c
JC
2436 case QL_VND_WRITE_I2C:
2437 return qla2x00_write_i2c(bsg_job);
2438
2439 case QL_VND_READ_I2C:
2440 return qla2x00_read_i2c(bsg_job);
2441
a9b6f722
SK
2442 case QL_VND_DIAG_IO_CMD:
2443 return qla24xx_process_bidir_cmd(bsg_job);
2444
8ae6d9c7
GM
2445 case QL_VND_FX00_MGMT_CMD:
2446 return qlafx00_mgmt_cmd(bsg_job);
db64e930
JC
2447
2448 case QL_VND_SERDES_OP:
2449 return qla26xx_serdes_op(bsg_job);
2450
e8887c51
JC
2451 case QL_VND_SERDES_OP_EX:
2452 return qla8044_serdes_op(bsg_job);
2453
4243c115
SC
2454 case QL_VND_GET_FLASH_UPDATE_CAPS:
2455 return qla27xx_get_flash_upd_cap(bsg_job);
2456
2457 case QL_VND_SET_FLASH_UPDATE_CAPS:
2458 return qla27xx_set_flash_upd_cap(bsg_job);
2459
969a6199
SC
2460 case QL_VND_GET_BBCR_DATA:
2461 return qla27xx_get_bbcr_data(bsg_job);
2462
243de676 2463 case QL_VND_GET_PRIV_STATS:
8437dda0 2464 case QL_VND_GET_PRIV_STATS_EX:
243de676
HZ
2465 return qla2x00_get_priv_stats(bsg_job);
2466
ec891462
JC
2467 case QL_VND_DPORT_DIAGNOSTICS:
2468 return qla2x00_do_dport_diagnostics(bsg_job);
2469
6e98016c 2470 default:
6e98016c
GM
2471 return -ENOSYS;
2472 }
2473}
2474
2475int
75cc8cfc 2476qla24xx_bsg_request(struct bsg_job *bsg_job)
6e98016c 2477{
01e0e15c
JT
2478 struct fc_bsg_request *bsg_request = bsg_job->request;
2479 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
6e98016c 2480 int ret = -EINVAL;
7c3df132 2481 struct fc_rport *rport;
7c3df132
SK
2482 struct Scsi_Host *host;
2483 scsi_qla_host_t *vha;
2484
b7bfbe12 2485 /* In case no data transferred. */
01e0e15c 2486 bsg_reply->reply_payload_rcv_len = 0;
b7bfbe12 2487
01e0e15c 2488 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
1d69b122 2489 rport = fc_bsg_to_rport(bsg_job);
7c3df132
SK
2490 host = rport_to_shost(rport);
2491 vha = shost_priv(host);
2492 } else {
cd21c605 2493 host = fc_bsg_to_shost(bsg_job);
7c3df132
SK
2494 vha = shost_priv(host);
2495 }
2496
d051a5aa
AV
2497 if (qla2x00_reset_active(vha)) {
2498 ql_dbg(ql_dbg_user, vha, 0x709f,
2499 "BSG: ISP abort active/needed -- cmd=%d.\n",
01e0e15c 2500 bsg_request->msgcode);
d051a5aa
AV
2501 return -EBUSY;
2502 }
2503
7c3df132 2504 ql_dbg(ql_dbg_user, vha, 0x7000,
01e0e15c 2505 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
6e98016c 2506
01e0e15c 2507 switch (bsg_request->msgcode) {
6e98016c
GM
2508 case FC_BSG_RPT_ELS:
2509 case FC_BSG_HST_ELS_NOLOGIN:
2510 ret = qla2x00_process_els(bsg_job);
2511 break;
2512 case FC_BSG_HST_CT:
2513 ret = qla2x00_process_ct(bsg_job);
2514 break;
2515 case FC_BSG_HST_VENDOR:
2516 ret = qla2x00_process_vendor_specific(bsg_job);
2517 break;
2518 case FC_BSG_HST_ADD_RPORT:
2519 case FC_BSG_HST_DEL_RPORT:
2520 case FC_BSG_RPT_CT:
2521 default:
7c3df132 2522 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
6e98016c 2523 break;
6c452a45 2524 }
6e98016c
GM
2525 return ret;
2526}
2527
2528int
75cc8cfc 2529qla24xx_bsg_timeout(struct bsg_job *bsg_job)
6e98016c 2530{
01e0e15c 2531 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
cd21c605 2532 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
6e98016c
GM
2533 struct qla_hw_data *ha = vha->hw;
2534 srb_t *sp;
2535 int cnt, que;
2536 unsigned long flags;
2537 struct req_que *req;
6e98016c
GM
2538
2539 /* find the bsg job from the active list of commands */
2540 spin_lock_irqsave(&ha->hardware_lock, flags);
2541 for (que = 0; que < ha->max_req_queues; que++) {
2542 req = ha->req_q_map[que];
2543 if (!req)
2544 continue;
2545
8d93f550 2546 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
6e98016c 2547 sp = req->outstanding_cmds[cnt];
6e98016c 2548 if (sp) {
9ba56b95 2549 if (((sp->type == SRB_CT_CMD) ||
8ae6d9c7
GM
2550 (sp->type == SRB_ELS_CMD_HST) ||
2551 (sp->type == SRB_FXIOCB_BCMD))
9ba56b95 2552 && (sp->u.bsg_job == bsg_job)) {
8edf3edd 2553 req->outstanding_cmds[cnt] = NULL;
900a36e3 2554 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6e98016c 2555 if (ha->isp_ops->abort_command(sp)) {
7c3df132
SK
2556 ql_log(ql_log_warn, vha, 0x7089,
2557 "mbx abort_command "
2558 "failed.\n");
6e98016c 2559 bsg_job->req->errors =
01e0e15c 2560 bsg_reply->result = -EIO;
6e98016c 2561 } else {
7c3df132
SK
2562 ql_dbg(ql_dbg_user, vha, 0x708a,
2563 "mbx abort_command "
2564 "success.\n");
6e98016c 2565 bsg_job->req->errors =
01e0e15c 2566 bsg_reply->result = 0;
6e98016c 2567 }
900a36e3 2568 spin_lock_irqsave(&ha->hardware_lock, flags);
6e98016c
GM
2569 goto done;
2570 }
2571 }
2572 }
2573 }
2574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7c3df132 2575 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
01e0e15c 2576 bsg_job->req->errors = bsg_reply->result = -ENXIO;
6e98016c
GM
2577 return 0;
2578
2579done:
2580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
8edf3edd 2581 sp->free(vha, sp);
6e98016c
GM
2582 return 0;
2583}