[SCSI] libfcoe, fcoe: libfcoe NPIV support
[linux-2.6-block.git] / drivers / scsi / libfc / fc_lport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, succesful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64/*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90#include <linux/timer.h>
91#include <asm/unaligned.h>
92
93#include <scsi/fc/fc_gs.h>
94
95#include <scsi/libfc.h>
96#include <scsi/fc_encode.h>
97
8866a5d9
RL
98#include "fc_libfc.h"
99
42e9a92f
RL
100/* Fabric IDs to use for point-to-point mode, chosen on whims. */
101#define FC_LOCAL_PTP_FID_LO 0x010101
102#define FC_LOCAL_PTP_FID_HI 0x010102
103
104#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
105
42e9a92f
RL
106static void fc_lport_error(struct fc_lport *, struct fc_frame *);
107
108static void fc_lport_enter_reset(struct fc_lport *);
109static void fc_lport_enter_flogi(struct fc_lport *);
110static void fc_lport_enter_dns(struct fc_lport *);
111static void fc_lport_enter_rpn_id(struct fc_lport *);
112static void fc_lport_enter_rft_id(struct fc_lport *);
113static void fc_lport_enter_scr(struct fc_lport *);
114static void fc_lport_enter_ready(struct fc_lport *);
115static void fc_lport_enter_logo(struct fc_lport *);
116
117static const char *fc_lport_state_names[] = {
b1d9fd55 118 [LPORT_ST_DISABLED] = "disabled",
42e9a92f
RL
119 [LPORT_ST_FLOGI] = "FLOGI",
120 [LPORT_ST_DNS] = "dNS",
121 [LPORT_ST_RPN_ID] = "RPN_ID",
122 [LPORT_ST_RFT_ID] = "RFT_ID",
123 [LPORT_ST_SCR] = "SCR",
124 [LPORT_ST_READY] = "Ready",
125 [LPORT_ST_LOGO] = "LOGO",
126 [LPORT_ST_RESET] = "reset",
127};
128
129static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
130{
131 fc_frame_free(fp);
132 return 0;
133}
134
135/**
34f42a07 136 * fc_lport_rport_callback() - Event handler for rport events
42e9a92f 137 * @lport: The lport which is receiving the event
9fb9d328 138 * @rdata: private remote port data
42e9a92f
RL
139 * @event: The event that occured
140 *
141 * Locking Note: The rport lock should not be held when calling
142 * this function.
143 */
144static void fc_lport_rport_callback(struct fc_lport *lport,
9fb9d328 145 struct fc_rport_priv *rdata,
42e9a92f
RL
146 enum fc_rport_event event)
147{
7414705e 148 FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
f211fa51 149 rdata->ids.port_id);
42e9a92f 150
b5cbf083 151 mutex_lock(&lport->lp_mutex);
42e9a92f 152 switch (event) {
4c0f62b5 153 case RPORT_EV_READY:
b5cbf083
JE
154 if (lport->state == LPORT_ST_DNS) {
155 lport->dns_rp = rdata;
156 fc_lport_enter_rpn_id(lport);
157 } else {
158 FC_LPORT_DBG(lport, "Received an READY event "
159 "on port (%6x) for the directory "
160 "server, but the lport is not "
161 "in the DNS state, it's in the "
162 "%d state", rdata->ids.port_id,
163 lport->state);
164 lport->tt.rport_logoff(rdata);
165 }
42e9a92f
RL
166 break;
167 case RPORT_EV_LOGO:
168 case RPORT_EV_FAILED:
169 case RPORT_EV_STOP:
b5cbf083 170 lport->dns_rp = NULL;
42e9a92f
RL
171 break;
172 case RPORT_EV_NONE:
173 break;
174 }
b5cbf083 175 mutex_unlock(&lport->lp_mutex);
42e9a92f
RL
176}
177
178/**
34f42a07 179 * fc_lport_state() - Return a string which represents the lport's state
42e9a92f
RL
180 * @lport: The lport whose state is to converted to a string
181 */
182static const char *fc_lport_state(struct fc_lport *lport)
183{
184 const char *cp;
185
186 cp = fc_lport_state_names[lport->state];
187 if (!cp)
188 cp = "unknown";
189 return cp;
190}
191
192/**
34f42a07 193 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
42e9a92f
RL
194 * @lport: The lport to attach the ptp rport to
195 * @fid: The FID of the ptp rport
196 * @remote_wwpn: The WWPN of the ptp rport
197 * @remote_wwnn: The WWNN of the ptp rport
198 */
199static void fc_lport_ptp_setup(struct fc_lport *lport,
200 u32 remote_fid, u64 remote_wwpn,
201 u64 remote_wwnn)
202{
48f00902
JE
203 mutex_lock(&lport->disc.disc_mutex);
204 if (lport->ptp_rp)
42e9a92f 205 lport->tt.rport_logoff(lport->ptp_rp);
9737e6a7
RL
206 lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
207 lport->ptp_rp->ids.port_name = remote_wwpn;
208 lport->ptp_rp->ids.node_name = remote_wwnn;
48f00902 209 mutex_unlock(&lport->disc.disc_mutex);
42e9a92f
RL
210
211 lport->tt.rport_login(lport->ptp_rp);
212
213 fc_lport_enter_ready(lport);
214}
215
216void fc_get_host_port_type(struct Scsi_Host *shost)
217{
218 /* TODO - currently just NPORT */
219 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
220}
221EXPORT_SYMBOL(fc_get_host_port_type);
222
223void fc_get_host_port_state(struct Scsi_Host *shost)
224{
225 struct fc_lport *lp = shost_priv(shost);
226
8faecddb
CL
227 mutex_lock(&lp->lp_mutex);
228 if (!lp->link_up)
229 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
42e9a92f 230 else
8faecddb
CL
231 switch (lp->state) {
232 case LPORT_ST_READY:
233 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
234 break;
235 default:
236 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
237 }
238 mutex_unlock(&lp->lp_mutex);
42e9a92f
RL
239}
240EXPORT_SYMBOL(fc_get_host_port_state);
241
242void fc_get_host_speed(struct Scsi_Host *shost)
243{
244 struct fc_lport *lport = shost_priv(shost);
245
246 fc_host_speed(shost) = lport->link_speed;
247}
248EXPORT_SYMBOL(fc_get_host_speed);
249
250struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
251{
42e9a92f
RL
252 struct fc_host_statistics *fcoe_stats;
253 struct fc_lport *lp = shost_priv(shost);
254 struct timespec v0, v1;
582b45bc 255 unsigned int cpu;
42e9a92f
RL
256
257 fcoe_stats = &lp->host_stats;
258 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
259
260 jiffies_to_timespec(jiffies, &v0);
261 jiffies_to_timespec(lp->boot_time, &v1);
262 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
263
582b45bc
RL
264 for_each_possible_cpu(cpu) {
265 struct fcoe_dev_stats *stats;
266
267 stats = per_cpu_ptr(lp->dev_stats, cpu);
268
42e9a92f
RL
269 fcoe_stats->tx_frames += stats->TxFrames;
270 fcoe_stats->tx_words += stats->TxWords;
271 fcoe_stats->rx_frames += stats->RxFrames;
272 fcoe_stats->rx_words += stats->RxWords;
273 fcoe_stats->error_frames += stats->ErrorFrames;
274 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
275 fcoe_stats->fcp_input_requests += stats->InputRequests;
276 fcoe_stats->fcp_output_requests += stats->OutputRequests;
277 fcoe_stats->fcp_control_requests += stats->ControlRequests;
278 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
279 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
280 fcoe_stats->link_failure_count += stats->LinkFailureCount;
281 }
282 fcoe_stats->lip_count = -1;
283 fcoe_stats->nos_count = -1;
284 fcoe_stats->loss_of_sync_count = -1;
285 fcoe_stats->loss_of_signal_count = -1;
286 fcoe_stats->prim_seq_protocol_err_count = -1;
287 fcoe_stats->dumped_frames = -1;
288 return fcoe_stats;
289}
290EXPORT_SYMBOL(fc_get_host_stats);
291
292/*
293 * Fill in FLOGI command for request.
294 */
295static void
296fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
297 unsigned int op)
298{
299 struct fc_els_csp *sp;
300 struct fc_els_cssp *cp;
301
302 memset(flogi, 0, sizeof(*flogi));
303 flogi->fl_cmd = (u8) op;
304 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
305 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
306 sp = &flogi->fl_csp;
307 sp->sp_hi_ver = 0x20;
308 sp->sp_lo_ver = 0x20;
309 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
310 sp->sp_bb_data = htons((u16) lport->mfs);
311 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
312 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
313 if (op != ELS_FLOGI) {
314 sp->sp_features = htons(FC_SP_FT_CIRO);
315 sp->sp_tot_seq = htons(255); /* seq. we accept */
316 sp->sp_rel_off = htons(0x1f);
317 sp->sp_e_d_tov = htonl(lport->e_d_tov);
318
319 cp->cp_rdfs = htons((u16) lport->mfs);
320 cp->cp_con_seq = htons(255);
321 cp->cp_open_seq = 1;
322 }
323}
324
325/*
326 * Add a supported FC-4 type.
327 */
328static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
329{
330 __be32 *mp;
331
332 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
333 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
334}
335
336/**
34f42a07 337 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
42e9a92f
RL
338 * @lport: Fibre Channel local port recieving the RLIR
339 * @sp: current sequence in the RLIR exchange
340 * @fp: RLIR request frame
341 *
1b69bc06 342 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
343 * this function.
344 */
345static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
346 struct fc_lport *lport)
347{
7414705e
RL
348 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
349 fc_lport_state(lport));
42e9a92f
RL
350
351 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
352 fc_frame_free(fp);
353}
354
355/**
34f42a07 356 * fc_lport_recv_echo_req() - Handle received ECHO request
42e9a92f
RL
357 * @lport: Fibre Channel local port recieving the ECHO
358 * @sp: current sequence in the ECHO exchange
359 * @fp: ECHO request frame
360 *
1b69bc06 361 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
362 * this function.
363 */
364static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
365 struct fc_lport *lport)
366{
367 struct fc_frame *fp;
368 struct fc_exch *ep = fc_seq_exch(sp);
369 unsigned int len;
370 void *pp;
371 void *dp;
372 u32 f_ctl;
373
1b69bc06 374 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
7414705e 375 fc_lport_state(lport));
42e9a92f
RL
376
377 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
378 pp = fc_frame_payload_get(in_fp, len);
379
380 if (len < sizeof(__be32))
381 len = sizeof(__be32);
382
383 fp = fc_frame_alloc(lport, len);
384 if (fp) {
385 dp = fc_frame_payload_get(fp, len);
386 memcpy(dp, pp, len);
1b69bc06 387 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
42e9a92f
RL
388 sp = lport->tt.seq_start_next(sp);
389 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
390 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
391 FC_TYPE_ELS, f_ctl, 0);
392 lport->tt.seq_send(lport, sp, fp);
393 }
394 fc_frame_free(in_fp);
395}
396
397/**
1b69bc06
JE
398 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
399 * @sp: The sequence in the RNID exchange
400 * @fp: The RNID request frame
401 * @lport: The local port recieving the RNID
42e9a92f 402 *
1b69bc06 403 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
404 * this function.
405 */
406static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
407 struct fc_lport *lport)
408{
409 struct fc_frame *fp;
410 struct fc_exch *ep = fc_seq_exch(sp);
411 struct fc_els_rnid *req;
412 struct {
413 struct fc_els_rnid_resp rnid;
414 struct fc_els_rnid_cid cid;
415 struct fc_els_rnid_gen gen;
416 } *rp;
417 struct fc_seq_els_data rjt_data;
418 u8 fmt;
419 size_t len;
420 u32 f_ctl;
421
7414705e
RL
422 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
423 fc_lport_state(lport));
42e9a92f
RL
424
425 req = fc_frame_payload_get(in_fp, sizeof(*req));
426 if (!req) {
427 rjt_data.fp = NULL;
428 rjt_data.reason = ELS_RJT_LOGIC;
429 rjt_data.explan = ELS_EXPL_NONE;
430 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
431 } else {
432 fmt = req->rnid_fmt;
433 len = sizeof(*rp);
434 if (fmt != ELS_RNIDF_GEN ||
435 ntohl(lport->rnid_gen.rnid_atype) == 0) {
436 fmt = ELS_RNIDF_NONE; /* nothing to provide */
437 len -= sizeof(rp->gen);
438 }
439 fp = fc_frame_alloc(lport, len);
440 if (fp) {
441 rp = fc_frame_payload_get(fp, len);
442 memset(rp, 0, len);
443 rp->rnid.rnid_cmd = ELS_LS_ACC;
444 rp->rnid.rnid_fmt = fmt;
445 rp->rnid.rnid_cid_len = sizeof(rp->cid);
446 rp->cid.rnid_wwpn = htonll(lport->wwpn);
447 rp->cid.rnid_wwnn = htonll(lport->wwnn);
448 if (fmt == ELS_RNIDF_GEN) {
449 rp->rnid.rnid_sid_len = sizeof(rp->gen);
450 memcpy(&rp->gen, &lport->rnid_gen,
451 sizeof(rp->gen));
452 }
453 sp = lport->tt.seq_start_next(sp);
454 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
455 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
456 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
457 FC_TYPE_ELS, f_ctl, 0);
458 lport->tt.seq_send(lport, sp, fp);
459 }
460 }
461 fc_frame_free(in_fp);
462}
463
42e9a92f 464/**
34f42a07 465 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
42e9a92f
RL
466 * @lport: Fibre Channel local port recieving the LOGO
467 * @sp: current sequence in the LOGO exchange
468 * @fp: LOGO request frame
469 *
470 * Locking Note: The lport lock is exected to be held before calling
471 * this function.
472 */
473static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
474 struct fc_lport *lport)
475{
476 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
477 fc_lport_enter_reset(lport);
478 fc_frame_free(fp);
479}
480
481/**
34f42a07 482 * fc_fabric_login() - Start the lport state machine
42e9a92f
RL
483 * @lport: The lport that should log into the fabric
484 *
485 * Locking Note: This function should not be called
486 * with the lport lock held.
487 */
488int fc_fabric_login(struct fc_lport *lport)
489{
490 int rc = -1;
491
492 mutex_lock(&lport->lp_mutex);
b1d9fd55 493 if (lport->state == LPORT_ST_DISABLED) {
42e9a92f
RL
494 fc_lport_enter_reset(lport);
495 rc = 0;
496 }
497 mutex_unlock(&lport->lp_mutex);
498
499 return rc;
500}
501EXPORT_SYMBOL(fc_fabric_login);
502
503/**
8faecddb 504 * __fc_linkup() - Handler for transport linkup events
42e9a92f 505 * @lport: The lport whose link is up
8faecddb
CL
506 *
507 * Locking: must be called with the lp_mutex held
42e9a92f 508 */
8faecddb 509void __fc_linkup(struct fc_lport *lport)
42e9a92f 510{
bc0e17f6
VD
511 if (!lport->link_up) {
512 lport->link_up = 1;
42e9a92f
RL
513
514 if (lport->state == LPORT_ST_RESET)
515 fc_lport_enter_flogi(lport);
516 }
8faecddb
CL
517}
518
519/**
520 * fc_linkup() - Handler for transport linkup events
521 * @lport: The lport whose link is up
522 */
523void fc_linkup(struct fc_lport *lport)
524{
525 printk(KERN_INFO "libfc: Link up on port (%6x)\n",
526 fc_host_port_id(lport->host));
527
528 mutex_lock(&lport->lp_mutex);
529 __fc_linkup(lport);
42e9a92f
RL
530 mutex_unlock(&lport->lp_mutex);
531}
532EXPORT_SYMBOL(fc_linkup);
533
534/**
8faecddb 535 * __fc_linkdown() - Handler for transport linkdown events
42e9a92f 536 * @lport: The lport whose link is down
8faecddb
CL
537 *
538 * Locking: must be called with the lp_mutex held
42e9a92f 539 */
8faecddb 540void __fc_linkdown(struct fc_lport *lport)
42e9a92f 541{
bc0e17f6
VD
542 if (lport->link_up) {
543 lport->link_up = 0;
42e9a92f
RL
544 fc_lport_enter_reset(lport);
545 lport->tt.fcp_cleanup(lport);
546 }
8faecddb
CL
547}
548
549/**
550 * fc_linkdown() - Handler for transport linkdown events
551 * @lport: The lport whose link is down
552 */
553void fc_linkdown(struct fc_lport *lport)
554{
555 printk(KERN_INFO "libfc: Link down on port (%6x)\n",
556 fc_host_port_id(lport->host));
557
558 mutex_lock(&lport->lp_mutex);
559 __fc_linkdown(lport);
42e9a92f
RL
560 mutex_unlock(&lport->lp_mutex);
561}
562EXPORT_SYMBOL(fc_linkdown);
563
42e9a92f 564/**
34f42a07 565 * fc_fabric_logoff() - Logout of the fabric
42e9a92f
RL
566 * @lport: fc_lport pointer to logoff the fabric
567 *
568 * Return value:
569 * 0 for success, -1 for failure
34f42a07 570 */
42e9a92f
RL
571int fc_fabric_logoff(struct fc_lport *lport)
572{
573 lport->tt.disc_stop_final(lport);
574 mutex_lock(&lport->lp_mutex);
a0fd2e49
AJ
575 if (lport->dns_rp)
576 lport->tt.rport_logoff(lport->dns_rp);
577 mutex_unlock(&lport->lp_mutex);
578 lport->tt.rport_flush_queue();
579 mutex_lock(&lport->lp_mutex);
42e9a92f
RL
580 fc_lport_enter_logo(lport);
581 mutex_unlock(&lport->lp_mutex);
f7db2c15 582 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
583 return 0;
584}
585EXPORT_SYMBOL(fc_fabric_logoff);
586
587/**
34f42a07 588 * fc_lport_destroy() - unregister a fc_lport
42e9a92f
RL
589 * @lport: fc_lport pointer to unregister
590 *
591 * Return value:
592 * None
593 * Note:
594 * exit routine for fc_lport instance
595 * clean-up all the allocated memory
596 * and free up other system resources.
597 *
34f42a07 598 */
42e9a92f
RL
599int fc_lport_destroy(struct fc_lport *lport)
600{
bbf15669 601 mutex_lock(&lport->lp_mutex);
b1d9fd55 602 lport->state = LPORT_ST_DISABLED;
bbf15669 603 lport->link_up = 0;
42e9a92f 604 lport->tt.frame_send = fc_frame_drop;
bbf15669
AJ
605 mutex_unlock(&lport->lp_mutex);
606
42e9a92f 607 lport->tt.fcp_abort_io(lport);
e9ba8b42 608 lport->tt.disc_stop_final(lport);
1f6ff364 609 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f
RL
610 return 0;
611}
612EXPORT_SYMBOL(fc_lport_destroy);
613
614/**
34f42a07 615 * fc_set_mfs() - sets up the mfs for the corresponding fc_lport
42e9a92f
RL
616 * @lport: fc_lport pointer to unregister
617 * @mfs: the new mfs for fc_lport
618 *
619 * Set mfs for the given fc_lport to the new mfs.
620 *
621 * Return: 0 for success
34f42a07 622 */
42e9a92f
RL
623int fc_set_mfs(struct fc_lport *lport, u32 mfs)
624{
625 unsigned int old_mfs;
626 int rc = -EINVAL;
627
628 mutex_lock(&lport->lp_mutex);
629
630 old_mfs = lport->mfs;
631
632 if (mfs >= FC_MIN_MAX_FRAME) {
633 mfs &= ~3;
634 if (mfs > FC_MAX_FRAME)
635 mfs = FC_MAX_FRAME;
636 mfs -= sizeof(struct fc_frame_header);
637 lport->mfs = mfs;
638 rc = 0;
639 }
640
641 if (!rc && mfs < old_mfs)
642 fc_lport_enter_reset(lport);
643
644 mutex_unlock(&lport->lp_mutex);
645
646 return rc;
647}
648EXPORT_SYMBOL(fc_set_mfs);
649
650/**
34f42a07 651 * fc_lport_disc_callback() - Callback for discovery events
42e9a92f
RL
652 * @lport: FC local port
653 * @event: The discovery event
654 */
655void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
656{
657 switch (event) {
658 case DISC_EV_SUCCESS:
7414705e 659 FC_LPORT_DBG(lport, "Discovery succeeded\n");
42e9a92f
RL
660 break;
661 case DISC_EV_FAILED:
7414705e
RL
662 printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
663 fc_host_port_id(lport->host));
42e9a92f
RL
664 mutex_lock(&lport->lp_mutex);
665 fc_lport_enter_reset(lport);
666 mutex_unlock(&lport->lp_mutex);
667 break;
668 case DISC_EV_NONE:
669 WARN_ON(1);
670 break;
671 }
672}
673
674/**
34f42a07 675 * fc_rport_enter_ready() - Enter the ready state and start discovery
42e9a92f
RL
676 * @lport: Fibre Channel local port that is ready
677 *
678 * Locking Note: The lport lock is expected to be held before calling
679 * this routine.
680 */
681static void fc_lport_enter_ready(struct fc_lport *lport)
682{
7414705e
RL
683 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
684 fc_lport_state(lport));
42e9a92f
RL
685
686 fc_lport_state_enter(lport, LPORT_ST_READY);
8faecddb
CL
687 if (lport->vport)
688 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
689 fc_vports_linkchange(lport);
42e9a92f 690
29d898e9
JE
691 if (!lport->ptp_rp)
692 lport->tt.disc_start(fc_lport_disc_callback, lport);
42e9a92f
RL
693}
694
695/**
34f42a07 696 * fc_lport_recv_flogi_req() - Receive a FLOGI request
42e9a92f
RL
697 * @sp_in: The sequence the FLOGI is on
698 * @rx_fp: The frame the FLOGI is in
699 * @lport: The lport that recieved the request
700 *
701 * A received FLOGI request indicates a point-to-point connection.
702 * Accept it with the common service parameters indicating our N port.
703 * Set up to do a PLOGI if we have the higher-number WWPN.
704 *
1b69bc06 705 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
706 * this function.
707 */
708static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
709 struct fc_frame *rx_fp,
710 struct fc_lport *lport)
711{
712 struct fc_frame *fp;
713 struct fc_frame_header *fh;
714 struct fc_seq *sp;
715 struct fc_exch *ep;
716 struct fc_els_flogi *flp;
717 struct fc_els_flogi *new_flp;
718 u64 remote_wwpn;
719 u32 remote_fid;
720 u32 local_fid;
721 u32 f_ctl;
722
7414705e
RL
723 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
724 fc_lport_state(lport));
42e9a92f
RL
725
726 fh = fc_frame_header_get(rx_fp);
727 remote_fid = ntoh24(fh->fh_s_id);
728 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
729 if (!flp)
730 goto out;
731 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
732 if (remote_wwpn == lport->wwpn) {
7414705e
RL
733 printk(KERN_WARNING "libfc: Received FLOGI from port "
734 "with same WWPN %llx\n", remote_wwpn);
42e9a92f
RL
735 goto out;
736 }
7414705e 737 FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
42e9a92f
RL
738
739 /*
740 * XXX what is the right thing to do for FIDs?
741 * The originator might expect our S_ID to be 0xfffffe.
742 * But if so, both of us could end up with the same FID.
743 */
744 local_fid = FC_LOCAL_PTP_FID_LO;
745 if (remote_wwpn < lport->wwpn) {
746 local_fid = FC_LOCAL_PTP_FID_HI;
747 if (!remote_fid || remote_fid == local_fid)
748 remote_fid = FC_LOCAL_PTP_FID_LO;
749 } else if (!remote_fid) {
750 remote_fid = FC_LOCAL_PTP_FID_HI;
751 }
752
753 fc_host_port_id(lport->host) = local_fid;
754
755 fp = fc_frame_alloc(lport, sizeof(*flp));
756 if (fp) {
757 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
758 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
759 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
760 new_flp->fl_cmd = (u8) ELS_LS_ACC;
761
762 /*
763 * Send the response. If this fails, the originator should
764 * repeat the sequence.
765 */
766 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
767 ep = fc_seq_exch(sp);
768 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
769 FC_TYPE_ELS, f_ctl, 0);
770 lport->tt.seq_send(lport, sp, fp);
771
772 } else {
773 fc_lport_error(lport, fp);
774 }
775 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
776 get_unaligned_be64(&flp->fl_wwnn));
777
42e9a92f
RL
778out:
779 sp = fr_seq(rx_fp);
780 fc_frame_free(rx_fp);
781}
782
783/**
34f42a07 784 * fc_lport_recv_req() - The generic lport request handler
42e9a92f
RL
785 * @lport: The lport that received the request
786 * @sp: The sequence the request is on
787 * @fp: The frame the request is in
788 *
789 * This function will see if the lport handles the request or
790 * if an rport should handle the request.
791 *
792 * Locking Note: This function should not be called with the lport
793 * lock held becuase it will grab the lock.
794 */
795static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
796 struct fc_frame *fp)
797{
798 struct fc_frame_header *fh = fc_frame_header_get(fp);
799 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
42e9a92f
RL
800
801 mutex_lock(&lport->lp_mutex);
802
803 /*
804 * Handle special ELS cases like FLOGI, LOGO, and
805 * RSCN here. These don't require a session.
806 * Even if we had a session, it might not be ready.
807 */
e9ba8b42
JE
808 if (!lport->link_up)
809 fc_frame_free(fp);
810 else if (fh->fh_type == FC_TYPE_ELS &&
811 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
42e9a92f
RL
812 /*
813 * Check opcode.
814 */
131203a1 815 recv = lport->tt.rport_recv_req;
42e9a92f
RL
816 switch (fc_frame_payload_op(fp)) {
817 case ELS_FLOGI:
818 recv = fc_lport_recv_flogi_req;
819 break;
820 case ELS_LOGO:
821 fh = fc_frame_header_get(fp);
822 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
823 recv = fc_lport_recv_logo_req;
824 break;
825 case ELS_RSCN:
826 recv = lport->tt.disc_recv_req;
827 break;
828 case ELS_ECHO:
829 recv = fc_lport_recv_echo_req;
830 break;
831 case ELS_RLIR:
832 recv = fc_lport_recv_rlir_req;
833 break;
834 case ELS_RNID:
835 recv = fc_lport_recv_rnid_req;
836 break;
42e9a92f
RL
837 }
838
131203a1 839 recv(sp, fp, lport);
42e9a92f 840 } else {
7414705e
RL
841 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
842 fr_eof(fp));
42e9a92f
RL
843 fc_frame_free(fp);
844 }
845 mutex_unlock(&lport->lp_mutex);
846
847 /*
848 * The common exch_done for all request may not be good
849 * if any request requires longer hold on exhange. XXX
850 */
851 lport->tt.exch_done(sp);
852}
853
854/**
34f42a07 855 * fc_lport_reset() - Reset an lport
42e9a92f
RL
856 * @lport: The lport which should be reset
857 *
858 * Locking Note: This functions should not be called with the
859 * lport lock held.
860 */
861int fc_lport_reset(struct fc_lport *lport)
862{
f7db2c15 863 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
864 mutex_lock(&lport->lp_mutex);
865 fc_lport_enter_reset(lport);
866 mutex_unlock(&lport->lp_mutex);
867 return 0;
868}
869EXPORT_SYMBOL(fc_lport_reset);
870
871/**
1190d925 872 * fc_lport_reset_locked() - Reset the local port
42e9a92f
RL
873 * @lport: Fibre Channel local port to be reset
874 *
875 * Locking Note: The lport lock is expected to be held before calling
876 * this routine.
877 */
1190d925 878static void fc_lport_reset_locked(struct fc_lport *lport)
42e9a92f 879{
42e9a92f
RL
880 if (lport->dns_rp)
881 lport->tt.rport_logoff(lport->dns_rp);
882
48f00902 883 lport->ptp_rp = NULL;
42e9a92f
RL
884
885 lport->tt.disc_stop(lport);
886
1f6ff364 887 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f
RL
888 fc_host_fabric_name(lport->host) = 0;
889 fc_host_port_id(lport->host) = 0;
1190d925 890}
42e9a92f 891
1190d925
JE
892/**
893 * fc_lport_enter_reset() - Reset the local port
894 * @lport: Fibre Channel local port to be reset
895 *
896 * Locking Note: The lport lock is expected to be held before calling
897 * this routine.
898 */
899static void fc_lport_enter_reset(struct fc_lport *lport)
900{
901 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
902 fc_lport_state(lport));
903
8faecddb
CL
904 if (lport->vport) {
905 if (lport->link_up)
906 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
907 else
908 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
909 }
1190d925 910 fc_lport_state_enter(lport, LPORT_ST_RESET);
8faecddb 911 fc_vports_linkchange(lport);
1190d925 912 fc_lport_reset_locked(lport);
bc0e17f6 913 if (lport->link_up)
42e9a92f
RL
914 fc_lport_enter_flogi(lport);
915}
916
1190d925
JE
917/**
918 * fc_lport_enter_disabled() - disable the local port
919 * @lport: Fibre Channel local port to be reset
920 *
921 * Locking Note: The lport lock is expected to be held before calling
922 * this routine.
923 */
924static void fc_lport_enter_disabled(struct fc_lport *lport)
925{
926 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
927 fc_lport_state(lport));
928
929 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
8faecddb 930 fc_vports_linkchange(lport);
1190d925
JE
931 fc_lport_reset_locked(lport);
932}
933
42e9a92f 934/**
34f42a07 935 * fc_lport_error() - Handler for any errors
42e9a92f
RL
936 * @lport: The fc_lport object
937 * @fp: The frame pointer
938 *
939 * If the error was caused by a resource allocation failure
940 * then wait for half a second and retry, otherwise retry
941 * after the e_d_tov time.
942 */
943static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
944{
945 unsigned long delay = 0;
7414705e
RL
946 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
947 PTR_ERR(fp), fc_lport_state(lport),
948 lport->retry_count);
42e9a92f
RL
949
950 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
951 /*
952 * Memory allocation failure, or the exchange timed out.
953 * Retry after delay
954 */
955 if (lport->retry_count < lport->max_retry_count) {
956 lport->retry_count++;
957 if (!fp)
958 delay = msecs_to_jiffies(500);
959 else
960 delay = msecs_to_jiffies(lport->e_d_tov);
961
962 schedule_delayed_work(&lport->retry_work, delay);
963 } else {
964 switch (lport->state) {
b1d9fd55 965 case LPORT_ST_DISABLED:
42e9a92f
RL
966 case LPORT_ST_READY:
967 case LPORT_ST_RESET:
968 case LPORT_ST_RPN_ID:
969 case LPORT_ST_RFT_ID:
970 case LPORT_ST_SCR:
971 case LPORT_ST_DNS:
972 case LPORT_ST_FLOGI:
973 case LPORT_ST_LOGO:
974 fc_lport_enter_reset(lport);
975 break;
976 }
977 }
978 }
979}
980
981/**
34f42a07
RL
982 * fc_lport_rft_id_resp() - Handle response to Register Fibre
983 * Channel Types by ID (RPN_ID) request
42e9a92f
RL
984 * @sp: current sequence in RPN_ID exchange
985 * @fp: response frame
986 * @lp_arg: Fibre Channel host port instance
987 *
988 * Locking Note: This function will be called without the lport lock
989 * held, but it will lock, call an _enter_* function or fc_lport_error
990 * and then unlock the lport.
991 */
992static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
993 void *lp_arg)
994{
995 struct fc_lport *lport = lp_arg;
996 struct fc_frame_header *fh;
997 struct fc_ct_hdr *ct;
998
f657d299
JE
999 FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp));
1000
42e9a92f
RL
1001 if (fp == ERR_PTR(-FC_EX_CLOSED))
1002 return;
1003
1004 mutex_lock(&lport->lp_mutex);
1005
42e9a92f 1006 if (lport->state != LPORT_ST_RFT_ID) {
7414705e
RL
1007 FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
1008 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1009 if (IS_ERR(fp))
1010 goto err;
42e9a92f
RL
1011 goto out;
1012 }
1013
76f6804e
AJ
1014 if (IS_ERR(fp)) {
1015 fc_lport_error(lport, fp);
1016 goto err;
1017 }
1018
42e9a92f
RL
1019 fh = fc_frame_header_get(fp);
1020 ct = fc_frame_payload_get(fp, sizeof(*ct));
1021
1022 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1023 ct->ct_fs_type == FC_FST_DIR &&
1024 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1025 ntohs(ct->ct_cmd) == FC_FS_ACC)
1026 fc_lport_enter_scr(lport);
1027 else
1028 fc_lport_error(lport, fp);
1029out:
1030 fc_frame_free(fp);
1031err:
1032 mutex_unlock(&lport->lp_mutex);
1033}
1034
1035/**
34f42a07
RL
1036 * fc_lport_rpn_id_resp() - Handle response to Register Port
1037 * Name by ID (RPN_ID) request
42e9a92f
RL
1038 * @sp: current sequence in RPN_ID exchange
1039 * @fp: response frame
1040 * @lp_arg: Fibre Channel host port instance
1041 *
1042 * Locking Note: This function will be called without the lport lock
1043 * held, but it will lock, call an _enter_* function or fc_lport_error
1044 * and then unlock the lport.
1045 */
1046static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1047 void *lp_arg)
1048{
1049 struct fc_lport *lport = lp_arg;
1050 struct fc_frame_header *fh;
1051 struct fc_ct_hdr *ct;
1052
f657d299
JE
1053 FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp));
1054
42e9a92f
RL
1055 if (fp == ERR_PTR(-FC_EX_CLOSED))
1056 return;
1057
1058 mutex_lock(&lport->lp_mutex);
1059
42e9a92f 1060 if (lport->state != LPORT_ST_RPN_ID) {
7414705e
RL
1061 FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
1062 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1063 if (IS_ERR(fp))
1064 goto err;
42e9a92f
RL
1065 goto out;
1066 }
1067
76f6804e
AJ
1068 if (IS_ERR(fp)) {
1069 fc_lport_error(lport, fp);
1070 goto err;
1071 }
1072
42e9a92f
RL
1073 fh = fc_frame_header_get(fp);
1074 ct = fc_frame_payload_get(fp, sizeof(*ct));
1075 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1076 ct->ct_fs_type == FC_FST_DIR &&
1077 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1078 ntohs(ct->ct_cmd) == FC_FS_ACC)
1079 fc_lport_enter_rft_id(lport);
1080 else
1081 fc_lport_error(lport, fp);
1082
1083out:
1084 fc_frame_free(fp);
1085err:
1086 mutex_unlock(&lport->lp_mutex);
1087}
1088
1089/**
34f42a07 1090 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
42e9a92f
RL
1091 * @sp: current sequence in SCR exchange
1092 * @fp: response frame
1093 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1094 *
1095 * Locking Note: This function will be called without the lport lock
1096 * held, but it will lock, call an _enter_* function or fc_lport_error
1097 * and then unlock the lport.
1098 */
1099static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1100 void *lp_arg)
1101{
1102 struct fc_lport *lport = lp_arg;
1103 u8 op;
1104
f657d299
JE
1105 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1106
42e9a92f
RL
1107 if (fp == ERR_PTR(-FC_EX_CLOSED))
1108 return;
1109
1110 mutex_lock(&lport->lp_mutex);
1111
42e9a92f 1112 if (lport->state != LPORT_ST_SCR) {
7414705e
RL
1113 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1114 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1115 if (IS_ERR(fp))
1116 goto err;
42e9a92f
RL
1117 goto out;
1118 }
1119
76f6804e
AJ
1120 if (IS_ERR(fp)) {
1121 fc_lport_error(lport, fp);
1122 goto err;
1123 }
1124
42e9a92f
RL
1125 op = fc_frame_payload_op(fp);
1126 if (op == ELS_LS_ACC)
1127 fc_lport_enter_ready(lport);
1128 else
1129 fc_lport_error(lport, fp);
1130
1131out:
1132 fc_frame_free(fp);
1133err:
1134 mutex_unlock(&lport->lp_mutex);
1135}
1136
1137/**
34f42a07 1138 * fc_lport_enter_scr() - Send a State Change Register (SCR) request
42e9a92f
RL
1139 * @lport: Fibre Channel local port to register for state changes
1140 *
1141 * Locking Note: The lport lock is expected to be held before calling
1142 * this routine.
1143 */
1144static void fc_lport_enter_scr(struct fc_lport *lport)
1145{
1146 struct fc_frame *fp;
1147
7414705e
RL
1148 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1149 fc_lport_state(lport));
42e9a92f
RL
1150
1151 fc_lport_state_enter(lport, LPORT_ST_SCR);
1152
1153 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1154 if (!fp) {
1155 fc_lport_error(lport, fp);
1156 return;
1157 }
1158
a46f327a 1159 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
42e9a92f 1160 fc_lport_scr_resp, lport, lport->e_d_tov))
8f550f93 1161 fc_lport_error(lport, NULL);
42e9a92f
RL
1162}
1163
1164/**
34f42a07 1165 * fc_lport_enter_rft_id() - Register FC4-types with the name server
42e9a92f
RL
1166 * @lport: Fibre Channel local port to register
1167 *
1168 * Locking Note: The lport lock is expected to be held before calling
1169 * this routine.
1170 */
1171static void fc_lport_enter_rft_id(struct fc_lport *lport)
1172{
1173 struct fc_frame *fp;
1174 struct fc_ns_fts *lps;
1175 int i;
1176
7414705e
RL
1177 FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
1178 fc_lport_state(lport));
42e9a92f
RL
1179
1180 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1181
1182 lps = &lport->fcts;
1183 i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
1184 while (--i >= 0)
1185 if (ntohl(lps->ff_type_map[i]) != 0)
1186 break;
1187 if (i < 0) {
1188 /* nothing to register, move on to SCR */
1189 fc_lport_enter_scr(lport);
1190 return;
1191 }
1192
1193 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1194 sizeof(struct fc_ns_rft));
1195 if (!fp) {
1196 fc_lport_error(lport, fp);
1197 return;
1198 }
1199
a46f327a 1200 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
42e9a92f
RL
1201 fc_lport_rft_id_resp,
1202 lport, lport->e_d_tov))
1203 fc_lport_error(lport, fp);
1204}
1205
1206/**
34f42a07 1207 * fc_rport_enter_rft_id() - Register port name with the name server
42e9a92f
RL
1208 * @lport: Fibre Channel local port to register
1209 *
1210 * Locking Note: The lport lock is expected to be held before calling
1211 * this routine.
1212 */
1213static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1214{
1215 struct fc_frame *fp;
1216
7414705e
RL
1217 FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
1218 fc_lport_state(lport));
42e9a92f
RL
1219
1220 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1221
1222 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1223 sizeof(struct fc_ns_rn_id));
1224 if (!fp) {
1225 fc_lport_error(lport, fp);
1226 return;
1227 }
1228
a46f327a 1229 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
42e9a92f
RL
1230 fc_lport_rpn_id_resp,
1231 lport, lport->e_d_tov))
8f550f93 1232 fc_lport_error(lport, NULL);
42e9a92f
RL
1233}
1234
1235static struct fc_rport_operations fc_lport_rport_ops = {
1236 .event_callback = fc_lport_rport_callback,
1237};
1238
1239/**
34f42a07 1240 * fc_rport_enter_dns() - Create a rport to the name server
42e9a92f
RL
1241 * @lport: Fibre Channel local port requesting a rport for the name server
1242 *
1243 * Locking Note: The lport lock is expected to be held before calling
1244 * this routine.
1245 */
1246static void fc_lport_enter_dns(struct fc_lport *lport)
1247{
ab28f1fd 1248 struct fc_rport_priv *rdata;
42e9a92f 1249
7414705e
RL
1250 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1251 fc_lport_state(lport));
42e9a92f
RL
1252
1253 fc_lport_state_enter(lport, LPORT_ST_DNS);
1254
48f00902 1255 mutex_lock(&lport->disc.disc_mutex);
9737e6a7 1256 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
48f00902 1257 mutex_unlock(&lport->disc.disc_mutex);
9fb9d328 1258 if (!rdata)
42e9a92f
RL
1259 goto err;
1260
42e9a92f 1261 rdata->ops = &fc_lport_rport_ops;
9fb9d328 1262 lport->tt.rport_login(rdata);
42e9a92f
RL
1263 return;
1264
1265err:
1266 fc_lport_error(lport, NULL);
1267}
1268
1269/**
34f42a07 1270 * fc_lport_timeout() - Handler for the retry_work timer.
42e9a92f
RL
1271 * @work: The work struct of the fc_lport
1272 */
1273static void fc_lport_timeout(struct work_struct *work)
1274{
1275 struct fc_lport *lport =
1276 container_of(work, struct fc_lport,
1277 retry_work.work);
1278
1279 mutex_lock(&lport->lp_mutex);
1280
1281 switch (lport->state) {
b1d9fd55 1282 case LPORT_ST_DISABLED:
22655ac2
JE
1283 WARN_ON(1);
1284 break;
42e9a92f 1285 case LPORT_ST_READY:
42e9a92f
RL
1286 WARN_ON(1);
1287 break;
22655ac2
JE
1288 case LPORT_ST_RESET:
1289 break;
42e9a92f
RL
1290 case LPORT_ST_FLOGI:
1291 fc_lport_enter_flogi(lport);
1292 break;
1293 case LPORT_ST_DNS:
1294 fc_lport_enter_dns(lport);
1295 break;
1296 case LPORT_ST_RPN_ID:
1297 fc_lport_enter_rpn_id(lport);
1298 break;
1299 case LPORT_ST_RFT_ID:
1300 fc_lport_enter_rft_id(lport);
1301 break;
1302 case LPORT_ST_SCR:
1303 fc_lport_enter_scr(lport);
1304 break;
1305 case LPORT_ST_LOGO:
1306 fc_lport_enter_logo(lport);
1307 break;
1308 }
1309
1310 mutex_unlock(&lport->lp_mutex);
1311}
1312
1313/**
34f42a07 1314 * fc_lport_logo_resp() - Handle response to LOGO request
42e9a92f
RL
1315 * @sp: current sequence in LOGO exchange
1316 * @fp: response frame
1317 * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
1318 *
1319 * Locking Note: This function will be called without the lport lock
1320 * held, but it will lock, call an _enter_* function or fc_lport_error
1321 * and then unlock the lport.
1322 */
11b56188 1323void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
42e9a92f
RL
1324 void *lp_arg)
1325{
1326 struct fc_lport *lport = lp_arg;
1327 u8 op;
1328
f657d299
JE
1329 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1330
42e9a92f
RL
1331 if (fp == ERR_PTR(-FC_EX_CLOSED))
1332 return;
1333
1334 mutex_lock(&lport->lp_mutex);
1335
42e9a92f 1336 if (lport->state != LPORT_ST_LOGO) {
7414705e
RL
1337 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1338 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1339 if (IS_ERR(fp))
1340 goto err;
42e9a92f
RL
1341 goto out;
1342 }
1343
76f6804e
AJ
1344 if (IS_ERR(fp)) {
1345 fc_lport_error(lport, fp);
1346 goto err;
1347 }
1348
42e9a92f
RL
1349 op = fc_frame_payload_op(fp);
1350 if (op == ELS_LS_ACC)
1190d925 1351 fc_lport_enter_disabled(lport);
42e9a92f
RL
1352 else
1353 fc_lport_error(lport, fp);
1354
1355out:
1356 fc_frame_free(fp);
1357err:
1358 mutex_unlock(&lport->lp_mutex);
1359}
11b56188 1360EXPORT_SYMBOL(fc_lport_logo_resp);
42e9a92f
RL
1361
1362/**
34f42a07 1363 * fc_rport_enter_logo() - Logout of the fabric
42e9a92f
RL
1364 * @lport: Fibre Channel local port to be logged out
1365 *
1366 * Locking Note: The lport lock is expected to be held before calling
1367 * this routine.
1368 */
1369static void fc_lport_enter_logo(struct fc_lport *lport)
1370{
1371 struct fc_frame *fp;
1372 struct fc_els_logo *logo;
1373
7414705e
RL
1374 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1375 fc_lport_state(lport));
42e9a92f
RL
1376
1377 fc_lport_state_enter(lport, LPORT_ST_LOGO);
8faecddb 1378 fc_vports_linkchange(lport);
42e9a92f 1379
42e9a92f
RL
1380 fp = fc_frame_alloc(lport, sizeof(*logo));
1381 if (!fp) {
1382 fc_lport_error(lport, fp);
1383 return;
1384 }
1385
a46f327a
JE
1386 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1387 fc_lport_logo_resp, lport, lport->e_d_tov))
8f550f93 1388 fc_lport_error(lport, NULL);
42e9a92f
RL
1389}
1390
1391/**
34f42a07 1392 * fc_lport_flogi_resp() - Handle response to FLOGI request
42e9a92f
RL
1393 * @sp: current sequence in FLOGI exchange
1394 * @fp: response frame
1395 * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
1396 *
1397 * Locking Note: This function will be called without the lport lock
1398 * held, but it will lock, call an _enter_* function or fc_lport_error
1399 * and then unlock the lport.
1400 */
11b56188 1401void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
42e9a92f
RL
1402 void *lp_arg)
1403{
1404 struct fc_lport *lport = lp_arg;
1405 struct fc_frame_header *fh;
1406 struct fc_els_flogi *flp;
1407 u32 did;
1408 u16 csp_flags;
1409 unsigned int r_a_tov;
1410 unsigned int e_d_tov;
1411 u16 mfs;
1412
f657d299
JE
1413 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1414
42e9a92f
RL
1415 if (fp == ERR_PTR(-FC_EX_CLOSED))
1416 return;
1417
1418 mutex_lock(&lport->lp_mutex);
1419
42e9a92f 1420 if (lport->state != LPORT_ST_FLOGI) {
7414705e
RL
1421 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1422 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1423 if (IS_ERR(fp))
1424 goto err;
42e9a92f
RL
1425 goto out;
1426 }
1427
76f6804e
AJ
1428 if (IS_ERR(fp)) {
1429 fc_lport_error(lport, fp);
1430 goto err;
1431 }
1432
42e9a92f
RL
1433 fh = fc_frame_header_get(fp);
1434 did = ntoh24(fh->fh_d_id);
1435 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1436
7414705e
RL
1437 printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
1438 did);
42e9a92f
RL
1439 fc_host_port_id(lport->host) = did;
1440
1441 flp = fc_frame_payload_get(fp, sizeof(*flp));
1442 if (flp) {
1443 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1444 FC_SP_BB_DATA_MASK;
1445 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1446 mfs < lport->mfs)
1447 lport->mfs = mfs;
1448 csp_flags = ntohs(flp->fl_csp.sp_features);
1449 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1450 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1451 if (csp_flags & FC_SP_FT_EDTR)
1452 e_d_tov /= 1000000;
db36c06c
CL
1453
1454 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1455
42e9a92f
RL
1456 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1457 if (e_d_tov > lport->e_d_tov)
1458 lport->e_d_tov = e_d_tov;
1459 lport->r_a_tov = 2 * e_d_tov;
7414705e
RL
1460 printk(KERN_INFO "libfc: Port (%6x) entered "
1461 "point to point mode\n", did);
42e9a92f
RL
1462 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1463 get_unaligned_be64(
1464 &flp->fl_wwpn),
1465 get_unaligned_be64(
1466 &flp->fl_wwnn));
1467 } else {
1468 lport->e_d_tov = e_d_tov;
1469 lport->r_a_tov = r_a_tov;
1470 fc_host_fabric_name(lport->host) =
1471 get_unaligned_be64(&flp->fl_wwnn);
1472 fc_lport_enter_dns(lport);
1473 }
1474 }
42e9a92f 1475 } else {
7414705e 1476 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
42e9a92f
RL
1477 }
1478
1479out:
1480 fc_frame_free(fp);
1481err:
1482 mutex_unlock(&lport->lp_mutex);
1483}
11b56188 1484EXPORT_SYMBOL(fc_lport_flogi_resp);
42e9a92f
RL
1485
1486/**
34f42a07 1487 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
42e9a92f
RL
1488 * @lport: Fibre Channel local port to be logged in to the fabric
1489 *
1490 * Locking Note: The lport lock is expected to be held before calling
1491 * this routine.
1492 */
1493void fc_lport_enter_flogi(struct fc_lport *lport)
1494{
1495 struct fc_frame *fp;
1496
7414705e
RL
1497 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1498 fc_lport_state(lport));
42e9a92f
RL
1499
1500 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1501
1502 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1503 if (!fp)
1504 return fc_lport_error(lport, fp);
1505
db36c06c
CL
1506 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1507 lport->vport ? ELS_FDISC : ELS_FLOGI,
42e9a92f 1508 fc_lport_flogi_resp, lport, lport->e_d_tov))
8f550f93 1509 fc_lport_error(lport, NULL);
42e9a92f
RL
1510}
1511
1512/* Configure a fc_lport */
1513int fc_lport_config(struct fc_lport *lport)
1514{
1515 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1516 mutex_init(&lport->lp_mutex);
1517
b1d9fd55 1518 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
42e9a92f
RL
1519
1520 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1521 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1522
1523 return 0;
1524}
1525EXPORT_SYMBOL(fc_lport_config);
1526
1527int fc_lport_init(struct fc_lport *lport)
1528{
1529 if (!lport->tt.lport_recv)
1530 lport->tt.lport_recv = fc_lport_recv_req;
1531
1532 if (!lport->tt.lport_reset)
1533 lport->tt.lport_reset = fc_lport_reset;
1534
1535 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1536 fc_host_node_name(lport->host) = lport->wwnn;
1537 fc_host_port_name(lport->host) = lport->wwpn;
1538 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1539 memset(fc_host_supported_fc4s(lport->host), 0,
1540 sizeof(fc_host_supported_fc4s(lport->host)));
1541 fc_host_supported_fc4s(lport->host)[2] = 1;
1542 fc_host_supported_fc4s(lport->host)[7] = 1;
1543
1544 /* This value is also unchanging */
1545 memset(fc_host_active_fc4s(lport->host), 0,
1546 sizeof(fc_host_active_fc4s(lport->host)));
1547 fc_host_active_fc4s(lport->host)[2] = 1;
1548 fc_host_active_fc4s(lport->host)[7] = 1;
1549 fc_host_maxframe_size(lport->host) = lport->mfs;
1550 fc_host_supported_speeds(lport->host) = 0;
1551 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1552 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1553 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1554 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1555
1556 return 0;
1557}
1558EXPORT_SYMBOL(fc_lport_init);