2 * bnx2fc_els.c: QLogic NetXtreme II Linux FCoE offload driver.
3 * This file contains helper routines that handle ELS requests
6 * Copyright (c) 2008 - 2013 Broadcom Corporation
7 * Copyright (c) 2014, QLogic Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
13 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
18 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
20 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
22 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
23 void *data, u32 data_len,
24 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
25 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
27 static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
29 struct bnx2fc_cmd *orig_io_req;
30 struct bnx2fc_cmd *rrq_req;
34 rrq_req = cb_arg->io_req;
35 orig_io_req = cb_arg->aborted_io_req;
37 BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
38 orig_io_req->xid, rrq_req->xid);
40 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
42 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
44 * els req is timed out. cleanup the IO with FW and
45 * drop the completion. Remove from active_cmd_queue.
47 BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
50 if (rrq_req->on_active_queue) {
51 list_del_init(&rrq_req->link);
52 rrq_req->on_active_queue = 0;
53 rc = bnx2fc_initiate_cleanup(rrq_req);
59 int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
62 struct fc_els_rrq rrq;
63 struct bnx2fc_rport *tgt = aborted_io_req->tgt;
64 struct fc_lport *lport = tgt->rdata->local_port;
65 struct bnx2fc_els_cb_arg *cb_arg = NULL;
67 u32 r_a_tov = lport->r_a_tov;
68 unsigned long start = jiffies;
71 BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
73 memset(&rrq, 0, sizeof(rrq));
75 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
77 printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
82 cb_arg->aborted_io_req = aborted_io_req;
84 rrq.rrq_cmd = ELS_RRQ;
85 hton24(rrq.rrq_s_id, sid);
86 rrq.rrq_ox_id = htons(aborted_io_req->xid);
87 rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
90 rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
91 bnx2fc_rrq_compl, cb_arg,
94 if (time_after(jiffies, start + (10 * HZ))) {
95 BNX2FC_ELS_DBG("rrq Failed\n");
104 BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
105 aborted_io_req->xid);
107 spin_lock_bh(&tgt->tgt_lock);
108 kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
109 spin_unlock_bh(&tgt->tgt_lock);
114 static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
116 struct bnx2fc_cmd *els_req;
117 struct bnx2fc_rport *tgt;
118 struct bnx2fc_mp_req *mp_req;
119 struct fc_frame_header *fc_hdr;
122 u32 resp_len, hdr_len;
127 l2_oxid = cb_arg->l2_oxid;
128 BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
130 els_req = cb_arg->io_req;
131 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
133 * els req is timed out. cleanup the IO with FW and
134 * drop the completion. libfc will handle the els timeout
136 if (els_req->on_active_queue) {
137 list_del_init(&els_req->link);
138 els_req->on_active_queue = 0;
139 rc = bnx2fc_initiate_cleanup(els_req);
146 mp_req = &(els_req->mp_req);
147 fc_hdr = &(mp_req->resp_fc_hdr);
148 resp_len = mp_req->resp_len;
149 resp_buf = mp_req->resp_buf;
151 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
153 printk(KERN_ERR PFX "Unable to alloc mp buf\n");
156 hdr_len = sizeof(*fc_hdr);
157 if (hdr_len + resp_len > PAGE_SIZE) {
158 printk(KERN_ERR PFX "l2_els_compl: resp len is "
159 "beyond page size\n");
162 memcpy(buf, fc_hdr, hdr_len);
163 memcpy(buf + hdr_len, resp_buf, resp_len);
164 frame_len = hdr_len + resp_len;
166 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
174 int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
176 struct fc_els_adisc *adisc;
177 struct fc_frame_header *fh;
178 struct bnx2fc_els_cb_arg *cb_arg;
179 struct fc_lport *lport = tgt->rdata->local_port;
180 u32 r_a_tov = lport->r_a_tov;
183 fh = fc_frame_header_get(fp);
184 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
186 printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
190 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
192 BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
193 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
194 /* adisc is initialized by libfc */
195 rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
196 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
202 int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
204 struct fc_els_logo *logo;
205 struct fc_frame_header *fh;
206 struct bnx2fc_els_cb_arg *cb_arg;
207 struct fc_lport *lport = tgt->rdata->local_port;
208 u32 r_a_tov = lport->r_a_tov;
211 fh = fc_frame_header_get(fp);
212 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
214 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
218 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
220 BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
221 logo = fc_frame_payload_get(fp, sizeof(*logo));
222 /* logo is initialized by libfc */
223 rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
224 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
230 int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
232 struct fc_els_rls *rls;
233 struct fc_frame_header *fh;
234 struct bnx2fc_els_cb_arg *cb_arg;
235 struct fc_lport *lport = tgt->rdata->local_port;
236 u32 r_a_tov = lport->r_a_tov;
239 fh = fc_frame_header_get(fp);
240 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
242 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
246 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
248 rls = fc_frame_payload_get(fp, sizeof(*rls));
249 /* rls is initialized by libfc */
250 rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
251 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
257 void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
259 struct bnx2fc_mp_req *mp_req;
260 struct fc_frame_header *fc_hdr, *fh;
261 struct bnx2fc_cmd *srr_req;
262 struct bnx2fc_cmd *orig_io_req;
266 u32 resp_len, hdr_len;
270 orig_io_req = cb_arg->aborted_io_req;
271 srr_req = cb_arg->io_req;
272 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
274 BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
277 rc = bnx2fc_initiate_abts(srr_req);
279 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
280 "failed. issue cleanup\n");
281 bnx2fc_initiate_cleanup(srr_req);
283 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
284 test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
285 BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
286 orig_io_req->xid, orig_io_req->req_flags);
289 orig_io_req->srr_retry++;
290 if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
291 struct bnx2fc_rport *tgt = orig_io_req->tgt;
292 spin_unlock_bh(&tgt->tgt_lock);
293 rc = bnx2fc_send_srr(orig_io_req,
294 orig_io_req->srr_offset,
295 orig_io_req->srr_rctl);
296 spin_lock_bh(&tgt->tgt_lock);
301 rc = bnx2fc_initiate_abts(orig_io_req);
303 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
304 "failed xid = 0x%x. issue cleanup\n",
306 bnx2fc_initiate_cleanup(orig_io_req);
310 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
311 test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
312 BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
313 orig_io_req->xid, orig_io_req->req_flags);
316 mp_req = &(srr_req->mp_req);
317 fc_hdr = &(mp_req->resp_fc_hdr);
318 resp_len = mp_req->resp_len;
319 resp_buf = mp_req->resp_buf;
321 hdr_len = sizeof(*fc_hdr);
322 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
324 printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
327 memcpy(buf, fc_hdr, hdr_len);
328 memcpy(buf + hdr_len, resp_buf, resp_len);
330 fp = fc_frame_alloc(NULL, resp_len);
332 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
336 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
337 /* Copy FC Frame header and payload into the frame */
338 memcpy(fh, buf, hdr_len + resp_len);
340 opcode = fc_frame_payload_op(fp);
343 BNX2FC_IO_DBG(srr_req, "SRR success\n");
346 BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
347 rc = bnx2fc_initiate_abts(orig_io_req);
349 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
350 "failed xid = 0x%x. issue cleanup\n",
352 bnx2fc_initiate_cleanup(orig_io_req);
356 BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
364 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
367 void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
369 struct bnx2fc_cmd *orig_io_req, *new_io_req;
370 struct bnx2fc_cmd *rec_req;
371 struct bnx2fc_mp_req *mp_req;
372 struct fc_frame_header *fc_hdr, *fh;
373 struct fc_els_ls_rjt *rjt;
374 struct fc_els_rec_acc *acc;
375 struct bnx2fc_rport *tgt;
376 struct fcoe_err_report_entry *err_entry;
377 struct scsi_cmnd *sc_cmd;
385 u32 resp_len, hdr_len;
387 bool send_seq_clnp = false;
388 bool abort_io = false;
390 BNX2FC_MISC_DBG("Entered rec_compl callback\n");
391 rec_req = cb_arg->io_req;
392 orig_io_req = cb_arg->aborted_io_req;
393 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
394 tgt = orig_io_req->tgt;
396 /* Handle REC timeout case */
397 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
398 BNX2FC_IO_DBG(rec_req, "timed out, abort "
401 /* els req is timed out. send abts for els */
402 rc = bnx2fc_initiate_abts(rec_req);
404 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
405 "failed. issue cleanup\n");
406 bnx2fc_initiate_cleanup(rec_req);
408 orig_io_req->rec_retry++;
409 /* REC timedout. send ABTS to the orig IO req */
410 if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
411 spin_unlock_bh(&tgt->tgt_lock);
412 rc = bnx2fc_send_rec(orig_io_req);
413 spin_lock_bh(&tgt->tgt_lock);
417 rc = bnx2fc_initiate_abts(orig_io_req);
419 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
420 "failed xid = 0x%x. issue cleanup\n",
422 bnx2fc_initiate_cleanup(orig_io_req);
427 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
428 BNX2FC_IO_DBG(rec_req, "completed"
433 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
434 BNX2FC_IO_DBG(rec_req, "abts in prog "
440 mp_req = &(rec_req->mp_req);
441 fc_hdr = &(mp_req->resp_fc_hdr);
442 resp_len = mp_req->resp_len;
443 acc = resp_buf = mp_req->resp_buf;
445 hdr_len = sizeof(*fc_hdr);
447 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
449 printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
452 memcpy(buf, fc_hdr, hdr_len);
453 memcpy(buf + hdr_len, resp_buf, resp_len);
455 fp = fc_frame_alloc(NULL, resp_len);
457 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
461 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
462 /* Copy FC Frame header and payload into the frame */
463 memcpy(fh, buf, hdr_len + resp_len);
465 opcode = fc_frame_payload_op(fp);
466 if (opcode == ELS_LS_RJT) {
467 BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
468 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
469 if ((rjt->er_reason == ELS_RJT_LOGIC ||
470 rjt->er_reason == ELS_RJT_UNAB) &&
471 rjt->er_explan == ELS_EXPL_OXID_RXID) {
472 BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
473 new_io_req = bnx2fc_cmd_alloc(tgt);
476 new_io_req->sc_cmd = orig_io_req->sc_cmd;
477 /* cleanup orig_io_req that is with the FW */
478 set_bit(BNX2FC_FLAG_CMD_LOST,
479 &orig_io_req->req_flags);
480 bnx2fc_initiate_cleanup(orig_io_req);
481 /* Post a new IO req with the same sc_cmd */
482 BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
483 spin_unlock_bh(&tgt->tgt_lock);
484 rc = bnx2fc_post_io_req(tgt, new_io_req);
485 spin_lock_bh(&tgt->tgt_lock);
488 BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
491 rc = bnx2fc_initiate_abts(orig_io_req);
493 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
494 "failed. issue cleanup\n");
495 bnx2fc_initiate_cleanup(orig_io_req);
497 } else if (opcode == ELS_LS_ACC) {
498 /* REVISIT: Check if the exchange is already aborted */
499 offset = ntohl(acc->reca_fc4value);
500 e_stat = ntohl(acc->reca_e_stat);
501 if (e_stat & ESB_ST_SEQ_INIT) {
502 BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
505 BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
507 /* Seq initiative is with us */
508 err_entry = (struct fcoe_err_report_entry *)
509 &orig_io_req->err_entry;
510 sc_cmd = orig_io_req->sc_cmd;
511 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
512 /* SCSI WRITE command */
513 if (offset == orig_io_req->data_xfer_len) {
514 BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
516 r_ctl = FC_RCTL_DD_CMD_STATUS;
519 /* start transmitting from offset */
520 BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
521 send_seq_clnp = true;
522 r_ctl = FC_RCTL_DD_DATA_DESC;
523 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
529 /* SCSI READ command */
530 if (err_entry->data.rx_buf_off ==
531 orig_io_req->data_xfer_len) {
533 BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
534 r_ctl = FC_RCTL_DD_CMD_STATUS;
537 /* request retransmission from this offset */
538 send_seq_clnp = true;
539 offset = err_entry->data.rx_buf_off;
540 BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
542 r_ctl = FC_RCTL_DD_SOL_DATA;
543 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
549 rc = bnx2fc_initiate_abts(orig_io_req);
551 BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
552 " failed. issue cleanup\n");
553 bnx2fc_initiate_cleanup(orig_io_req);
555 } else if (!send_seq_clnp) {
556 BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
557 spin_unlock_bh(&tgt->tgt_lock);
558 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
559 spin_lock_bh(&tgt->tgt_lock);
562 BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
572 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
576 int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
578 struct fc_els_rec rec;
579 struct bnx2fc_rport *tgt = orig_io_req->tgt;
580 struct fc_lport *lport = tgt->rdata->local_port;
581 struct bnx2fc_els_cb_arg *cb_arg = NULL;
583 u32 r_a_tov = lport->r_a_tov;
586 BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
587 memset(&rec, 0, sizeof(rec));
589 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
591 printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
595 kref_get(&orig_io_req->refcount);
597 cb_arg->aborted_io_req = orig_io_req;
599 rec.rec_cmd = ELS_REC;
600 hton24(rec.rec_s_id, sid);
601 rec.rec_ox_id = htons(orig_io_req->xid);
602 rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
604 rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
605 bnx2fc_rec_compl, cb_arg,
609 BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
610 spin_lock_bh(&tgt->tgt_lock);
611 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
612 spin_unlock_bh(&tgt->tgt_lock);
618 int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
621 struct bnx2fc_rport *tgt = orig_io_req->tgt;
622 struct fc_lport *lport = tgt->rdata->local_port;
623 struct bnx2fc_els_cb_arg *cb_arg = NULL;
624 u32 r_a_tov = lport->r_a_tov;
627 BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
628 memset(&srr, 0, sizeof(srr));
630 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
632 printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
636 kref_get(&orig_io_req->refcount);
638 cb_arg->aborted_io_req = orig_io_req;
640 srr.srr_op = ELS_SRR;
641 srr.srr_ox_id = htons(orig_io_req->xid);
642 srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
643 srr.srr_rel_off = htonl(offset);
644 srr.srr_r_ctl = r_ctl;
645 orig_io_req->srr_offset = offset;
646 orig_io_req->srr_rctl = r_ctl;
648 rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
649 bnx2fc_srr_compl, cb_arg,
653 BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
654 spin_lock_bh(&tgt->tgt_lock);
655 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
656 spin_unlock_bh(&tgt->tgt_lock);
659 set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
664 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
665 void *data, u32 data_len,
666 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
667 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
669 struct fcoe_port *port = tgt->port;
670 struct bnx2fc_interface *interface = port->priv;
671 struct fc_rport *rport = tgt->rport;
672 struct fc_lport *lport = port->lport;
673 struct bnx2fc_cmd *els_req;
674 struct bnx2fc_mp_req *mp_req;
675 struct fc_frame_header *fc_hdr;
676 struct fcoe_task_ctx_entry *task;
677 struct fcoe_task_ctx_entry *task_page;
683 rc = fc_remote_port_chkready(rport);
685 printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
689 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
690 printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
694 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
695 (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
696 printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
700 els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
706 els_req->sc_cmd = NULL;
707 els_req->port = port;
709 els_req->cb_func = cb_func;
710 cb_arg->io_req = els_req;
711 els_req->cb_arg = cb_arg;
713 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
714 rc = bnx2fc_init_mp_req(els_req);
716 printk(KERN_ERR PFX "ELS MP request init failed\n");
717 spin_lock_bh(&tgt->tgt_lock);
718 kref_put(&els_req->refcount, bnx2fc_cmd_release);
719 spin_unlock_bh(&tgt->tgt_lock);
727 /* Set the data_xfer_len to the size of ELS payload */
728 mp_req->req_len = data_len;
729 els_req->data_xfer_len = mp_req->req_len;
731 /* Fill ELS Payload */
732 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
733 memcpy(mp_req->req_buf, data, data_len);
735 printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
736 els_req->cb_func = NULL;
737 els_req->cb_arg = NULL;
738 spin_lock_bh(&tgt->tgt_lock);
739 kref_put(&els_req->refcount, bnx2fc_cmd_release);
740 spin_unlock_bh(&tgt->tgt_lock);
748 fc_hdr = &(mp_req->req_fc_hdr);
750 did = tgt->rport->port_id;
754 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
755 FC_TYPE_FCP, FC_FC_FIRST_SEQ |
756 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
758 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
759 FC_TYPE_ELS, FC_FC_FIRST_SEQ |
760 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
762 /* Obtain exchange id */
764 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
765 index = xid % BNX2FC_TASKS_PER_PAGE;
767 /* Initialize task context for this IO request */
768 task_page = (struct fcoe_task_ctx_entry *)
769 interface->hba->task_ctx[task_idx];
770 task = &(task_page[index]);
771 bnx2fc_init_mp_task(els_req, task);
773 spin_lock_bh(&tgt->tgt_lock);
775 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
776 printk(KERN_ERR PFX "initiate_els.. session not ready\n");
777 els_req->cb_func = NULL;
778 els_req->cb_arg = NULL;
779 kref_put(&els_req->refcount, bnx2fc_cmd_release);
780 spin_unlock_bh(&tgt->tgt_lock);
785 bnx2fc_cmd_timer_set(els_req, timer_msec);
786 bnx2fc_add_2_sq(tgt, xid);
788 els_req->on_active_queue = 1;
789 list_add_tail(&els_req->link, &tgt->els_queue);
792 bnx2fc_ring_doorbell(tgt);
793 spin_unlock_bh(&tgt->tgt_lock);
799 void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
800 struct fcoe_task_ctx_entry *task, u8 num_rq)
802 struct bnx2fc_mp_req *mp_req;
803 struct fc_frame_header *fc_hdr;
807 BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
808 "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
810 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
811 &els_req->req_flags)) {
812 BNX2FC_ELS_DBG("Timer context finished processing this "
813 "els - 0x%x\n", els_req->xid);
814 /* This IO doesn't receive cleanup completion */
815 kref_put(&els_req->refcount, bnx2fc_cmd_release);
819 /* Cancel the timeout_work, as we received the response */
820 if (cancel_delayed_work(&els_req->timeout_work))
821 kref_put(&els_req->refcount,
822 bnx2fc_cmd_release); /* drop timer hold */
824 if (els_req->on_active_queue) {
825 list_del_init(&els_req->link);
826 els_req->on_active_queue = 0;
829 mp_req = &(els_req->mp_req);
830 fc_hdr = &(mp_req->resp_fc_hdr);
834 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
835 hdr[0] = cpu_to_be64(temp_hdr[0]);
836 hdr[1] = cpu_to_be64(temp_hdr[1]);
837 hdr[2] = cpu_to_be64(temp_hdr[2]);
840 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
842 /* Parse ELS response */
843 if ((els_req->cb_func) && (els_req->cb_arg)) {
844 els_req->cb_func(els_req->cb_arg);
845 els_req->cb_arg = NULL;
848 kref_put(&els_req->refcount, bnx2fc_cmd_release);
851 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
854 struct fcoe_ctlr *fip = arg;
855 struct fc_exch *exch = fc_seq_exch(seq);
856 struct fc_lport *lport = exch->lp;
863 mac = fr_cb(fp)->granted_mac;
864 if (is_zero_ether_addr(mac)) {
865 op = fc_frame_payload_op(fp);
867 if (op == ELS_LS_RJT) {
868 printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
869 fc_vport_terminate(lport->vport);
874 fcoe_ctlr_recv_flogi(fip, lport, fp);
876 if (!is_zero_ether_addr(mac))
877 fip->update_mac(lport, mac);
879 fc_lport_flogi_resp(seq, fp, lport);
882 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
885 struct fcoe_ctlr *fip = arg;
886 struct fc_exch *exch = fc_seq_exch(seq);
887 struct fc_lport *lport = exch->lp;
888 static u8 zero_mac[ETH_ALEN] = { 0 };
891 fip->update_mac(lport, zero_mac);
892 fc_lport_logo_resp(seq, fp, lport);
895 struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
896 struct fc_frame *fp, unsigned int op,
897 void (*resp)(struct fc_seq *,
900 void *arg, u32 timeout)
902 struct fcoe_port *port = lport_priv(lport);
903 struct bnx2fc_interface *interface = port->priv;
904 struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
905 struct fc_frame_header *fh = fc_frame_header_get(fp);
910 return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
913 /* only hook onto fabric logouts, not port logouts */
914 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
916 return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
919 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);