[SCSI] libfc: fix: rport_recv_req needs disc_mutex when calling rport_lookup
[linux-2.6-block.git] / drivers / scsi / libfc / fc_lport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, succesful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64/*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90#include <linux/timer.h>
91#include <asm/unaligned.h>
92
93#include <scsi/fc/fc_gs.h>
94
95#include <scsi/libfc.h>
96#include <scsi/fc_encode.h>
97
98/* Fabric IDs to use for point-to-point mode, chosen on whims. */
99#define FC_LOCAL_PTP_FID_LO 0x010101
100#define FC_LOCAL_PTP_FID_HI 0x010102
101
102#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
103
42e9a92f
RL
104static void fc_lport_error(struct fc_lport *, struct fc_frame *);
105
106static void fc_lport_enter_reset(struct fc_lport *);
107static void fc_lport_enter_flogi(struct fc_lport *);
108static void fc_lport_enter_dns(struct fc_lport *);
109static void fc_lport_enter_rpn_id(struct fc_lport *);
110static void fc_lport_enter_rft_id(struct fc_lport *);
111static void fc_lport_enter_scr(struct fc_lport *);
112static void fc_lport_enter_ready(struct fc_lport *);
113static void fc_lport_enter_logo(struct fc_lport *);
114
115static const char *fc_lport_state_names[] = {
b1d9fd55 116 [LPORT_ST_DISABLED] = "disabled",
42e9a92f
RL
117 [LPORT_ST_FLOGI] = "FLOGI",
118 [LPORT_ST_DNS] = "dNS",
119 [LPORT_ST_RPN_ID] = "RPN_ID",
120 [LPORT_ST_RFT_ID] = "RFT_ID",
121 [LPORT_ST_SCR] = "SCR",
122 [LPORT_ST_READY] = "Ready",
123 [LPORT_ST_LOGO] = "LOGO",
124 [LPORT_ST_RESET] = "reset",
125};
126
127static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
128{
129 fc_frame_free(fp);
130 return 0;
131}
132
133/**
34f42a07 134 * fc_lport_rport_callback() - Event handler for rport events
42e9a92f 135 * @lport: The lport which is receiving the event
9fb9d328 136 * @rdata: private remote port data
42e9a92f
RL
137 * @event: The event that occured
138 *
139 * Locking Note: The rport lock should not be held when calling
140 * this function.
141 */
142static void fc_lport_rport_callback(struct fc_lport *lport,
9fb9d328 143 struct fc_rport_priv *rdata,
42e9a92f
RL
144 enum fc_rport_event event)
145{
7414705e 146 FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
f211fa51 147 rdata->ids.port_id);
42e9a92f 148
b5cbf083 149 mutex_lock(&lport->lp_mutex);
42e9a92f 150 switch (event) {
4c0f62b5 151 case RPORT_EV_READY:
b5cbf083
JE
152 if (lport->state == LPORT_ST_DNS) {
153 lport->dns_rp = rdata;
154 fc_lport_enter_rpn_id(lport);
155 } else {
156 FC_LPORT_DBG(lport, "Received an READY event "
157 "on port (%6x) for the directory "
158 "server, but the lport is not "
159 "in the DNS state, it's in the "
160 "%d state", rdata->ids.port_id,
161 lport->state);
162 lport->tt.rport_logoff(rdata);
163 }
42e9a92f
RL
164 break;
165 case RPORT_EV_LOGO:
166 case RPORT_EV_FAILED:
167 case RPORT_EV_STOP:
b5cbf083 168 lport->dns_rp = NULL;
42e9a92f
RL
169 break;
170 case RPORT_EV_NONE:
171 break;
172 }
b5cbf083 173 mutex_unlock(&lport->lp_mutex);
42e9a92f
RL
174}
175
176/**
34f42a07 177 * fc_lport_state() - Return a string which represents the lport's state
42e9a92f
RL
178 * @lport: The lport whose state is to converted to a string
179 */
180static const char *fc_lport_state(struct fc_lport *lport)
181{
182 const char *cp;
183
184 cp = fc_lport_state_names[lport->state];
185 if (!cp)
186 cp = "unknown";
187 return cp;
188}
189
190/**
34f42a07 191 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
42e9a92f
RL
192 * @lport: The lport to attach the ptp rport to
193 * @fid: The FID of the ptp rport
194 * @remote_wwpn: The WWPN of the ptp rport
195 * @remote_wwnn: The WWNN of the ptp rport
196 */
197static void fc_lport_ptp_setup(struct fc_lport *lport,
198 u32 remote_fid, u64 remote_wwpn,
199 u64 remote_wwnn)
200{
48f00902
JE
201 mutex_lock(&lport->disc.disc_mutex);
202 if (lport->ptp_rp)
42e9a92f 203 lport->tt.rport_logoff(lport->ptp_rp);
9737e6a7
RL
204 lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
205 lport->ptp_rp->ids.port_name = remote_wwpn;
206 lport->ptp_rp->ids.node_name = remote_wwnn;
48f00902 207 mutex_unlock(&lport->disc.disc_mutex);
42e9a92f
RL
208
209 lport->tt.rport_login(lport->ptp_rp);
210
211 fc_lport_enter_ready(lport);
212}
213
214void fc_get_host_port_type(struct Scsi_Host *shost)
215{
216 /* TODO - currently just NPORT */
217 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
218}
219EXPORT_SYMBOL(fc_get_host_port_type);
220
221void fc_get_host_port_state(struct Scsi_Host *shost)
222{
223 struct fc_lport *lp = shost_priv(shost);
224
bc0e17f6 225 if (lp->link_up)
42e9a92f
RL
226 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
227 else
228 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
229}
230EXPORT_SYMBOL(fc_get_host_port_state);
231
232void fc_get_host_speed(struct Scsi_Host *shost)
233{
234 struct fc_lport *lport = shost_priv(shost);
235
236 fc_host_speed(shost) = lport->link_speed;
237}
238EXPORT_SYMBOL(fc_get_host_speed);
239
240struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
241{
42e9a92f
RL
242 struct fc_host_statistics *fcoe_stats;
243 struct fc_lport *lp = shost_priv(shost);
244 struct timespec v0, v1;
582b45bc 245 unsigned int cpu;
42e9a92f
RL
246
247 fcoe_stats = &lp->host_stats;
248 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
249
250 jiffies_to_timespec(jiffies, &v0);
251 jiffies_to_timespec(lp->boot_time, &v1);
252 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
253
582b45bc
RL
254 for_each_possible_cpu(cpu) {
255 struct fcoe_dev_stats *stats;
256
257 stats = per_cpu_ptr(lp->dev_stats, cpu);
258
42e9a92f
RL
259 fcoe_stats->tx_frames += stats->TxFrames;
260 fcoe_stats->tx_words += stats->TxWords;
261 fcoe_stats->rx_frames += stats->RxFrames;
262 fcoe_stats->rx_words += stats->RxWords;
263 fcoe_stats->error_frames += stats->ErrorFrames;
264 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
265 fcoe_stats->fcp_input_requests += stats->InputRequests;
266 fcoe_stats->fcp_output_requests += stats->OutputRequests;
267 fcoe_stats->fcp_control_requests += stats->ControlRequests;
268 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
269 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
270 fcoe_stats->link_failure_count += stats->LinkFailureCount;
271 }
272 fcoe_stats->lip_count = -1;
273 fcoe_stats->nos_count = -1;
274 fcoe_stats->loss_of_sync_count = -1;
275 fcoe_stats->loss_of_signal_count = -1;
276 fcoe_stats->prim_seq_protocol_err_count = -1;
277 fcoe_stats->dumped_frames = -1;
278 return fcoe_stats;
279}
280EXPORT_SYMBOL(fc_get_host_stats);
281
282/*
283 * Fill in FLOGI command for request.
284 */
285static void
286fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
287 unsigned int op)
288{
289 struct fc_els_csp *sp;
290 struct fc_els_cssp *cp;
291
292 memset(flogi, 0, sizeof(*flogi));
293 flogi->fl_cmd = (u8) op;
294 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
295 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
296 sp = &flogi->fl_csp;
297 sp->sp_hi_ver = 0x20;
298 sp->sp_lo_ver = 0x20;
299 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
300 sp->sp_bb_data = htons((u16) lport->mfs);
301 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
302 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
303 if (op != ELS_FLOGI) {
304 sp->sp_features = htons(FC_SP_FT_CIRO);
305 sp->sp_tot_seq = htons(255); /* seq. we accept */
306 sp->sp_rel_off = htons(0x1f);
307 sp->sp_e_d_tov = htonl(lport->e_d_tov);
308
309 cp->cp_rdfs = htons((u16) lport->mfs);
310 cp->cp_con_seq = htons(255);
311 cp->cp_open_seq = 1;
312 }
313}
314
315/*
316 * Add a supported FC-4 type.
317 */
318static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
319{
320 __be32 *mp;
321
322 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
323 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
324}
325
326/**
34f42a07 327 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
42e9a92f
RL
328 * @lport: Fibre Channel local port recieving the RLIR
329 * @sp: current sequence in the RLIR exchange
330 * @fp: RLIR request frame
331 *
332 * Locking Note: The lport lock is exected to be held before calling
333 * this function.
334 */
335static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
336 struct fc_lport *lport)
337{
7414705e
RL
338 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
339 fc_lport_state(lport));
42e9a92f
RL
340
341 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
342 fc_frame_free(fp);
343}
344
345/**
34f42a07 346 * fc_lport_recv_echo_req() - Handle received ECHO request
42e9a92f
RL
347 * @lport: Fibre Channel local port recieving the ECHO
348 * @sp: current sequence in the ECHO exchange
349 * @fp: ECHO request frame
350 *
351 * Locking Note: The lport lock is exected to be held before calling
352 * this function.
353 */
354static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
355 struct fc_lport *lport)
356{
357 struct fc_frame *fp;
358 struct fc_exch *ep = fc_seq_exch(sp);
359 unsigned int len;
360 void *pp;
361 void *dp;
362 u32 f_ctl;
363
7414705e
RL
364 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
365 fc_lport_state(lport));
42e9a92f
RL
366
367 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
368 pp = fc_frame_payload_get(in_fp, len);
369
370 if (len < sizeof(__be32))
371 len = sizeof(__be32);
372
373 fp = fc_frame_alloc(lport, len);
374 if (fp) {
375 dp = fc_frame_payload_get(fp, len);
376 memcpy(dp, pp, len);
377 *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
378 sp = lport->tt.seq_start_next(sp);
379 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
380 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
381 FC_TYPE_ELS, f_ctl, 0);
382 lport->tt.seq_send(lport, sp, fp);
383 }
384 fc_frame_free(in_fp);
385}
386
387/**
34f42a07 388 * fc_lport_recv_echo_req() - Handle received Request Node ID data request
42e9a92f
RL
389 * @lport: Fibre Channel local port recieving the RNID
390 * @sp: current sequence in the RNID exchange
391 * @fp: RNID request frame
392 *
393 * Locking Note: The lport lock is exected to be held before calling
394 * this function.
395 */
396static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
397 struct fc_lport *lport)
398{
399 struct fc_frame *fp;
400 struct fc_exch *ep = fc_seq_exch(sp);
401 struct fc_els_rnid *req;
402 struct {
403 struct fc_els_rnid_resp rnid;
404 struct fc_els_rnid_cid cid;
405 struct fc_els_rnid_gen gen;
406 } *rp;
407 struct fc_seq_els_data rjt_data;
408 u8 fmt;
409 size_t len;
410 u32 f_ctl;
411
7414705e
RL
412 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
413 fc_lport_state(lport));
42e9a92f
RL
414
415 req = fc_frame_payload_get(in_fp, sizeof(*req));
416 if (!req) {
417 rjt_data.fp = NULL;
418 rjt_data.reason = ELS_RJT_LOGIC;
419 rjt_data.explan = ELS_EXPL_NONE;
420 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
421 } else {
422 fmt = req->rnid_fmt;
423 len = sizeof(*rp);
424 if (fmt != ELS_RNIDF_GEN ||
425 ntohl(lport->rnid_gen.rnid_atype) == 0) {
426 fmt = ELS_RNIDF_NONE; /* nothing to provide */
427 len -= sizeof(rp->gen);
428 }
429 fp = fc_frame_alloc(lport, len);
430 if (fp) {
431 rp = fc_frame_payload_get(fp, len);
432 memset(rp, 0, len);
433 rp->rnid.rnid_cmd = ELS_LS_ACC;
434 rp->rnid.rnid_fmt = fmt;
435 rp->rnid.rnid_cid_len = sizeof(rp->cid);
436 rp->cid.rnid_wwpn = htonll(lport->wwpn);
437 rp->cid.rnid_wwnn = htonll(lport->wwnn);
438 if (fmt == ELS_RNIDF_GEN) {
439 rp->rnid.rnid_sid_len = sizeof(rp->gen);
440 memcpy(&rp->gen, &lport->rnid_gen,
441 sizeof(rp->gen));
442 }
443 sp = lport->tt.seq_start_next(sp);
444 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
445 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
446 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
447 FC_TYPE_ELS, f_ctl, 0);
448 lport->tt.seq_send(lport, sp, fp);
449 }
450 }
451 fc_frame_free(in_fp);
452}
453
454/**
34f42a07 455 * fc_lport_recv_adisc_req() - Handle received Address Discovery Request
42e9a92f
RL
456 * @lport: Fibre Channel local port recieving the ADISC
457 * @sp: current sequence in the ADISC exchange
458 * @fp: ADISC request frame
459 *
460 * Locking Note: The lport lock is expected to be held before calling
461 * this function.
462 */
463static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
464 struct fc_lport *lport)
465{
466 struct fc_frame *fp;
467 struct fc_exch *ep = fc_seq_exch(sp);
468 struct fc_els_adisc *req, *rp;
469 struct fc_seq_els_data rjt_data;
470 size_t len;
471 u32 f_ctl;
472
7414705e
RL
473 FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
474 fc_lport_state(lport));
42e9a92f
RL
475
476 req = fc_frame_payload_get(in_fp, sizeof(*req));
477 if (!req) {
478 rjt_data.fp = NULL;
479 rjt_data.reason = ELS_RJT_LOGIC;
480 rjt_data.explan = ELS_EXPL_NONE;
481 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
482 } else {
483 len = sizeof(*rp);
484 fp = fc_frame_alloc(lport, len);
485 if (fp) {
486 rp = fc_frame_payload_get(fp, len);
487 memset(rp, 0, len);
488 rp->adisc_cmd = ELS_LS_ACC;
489 rp->adisc_wwpn = htonll(lport->wwpn);
490 rp->adisc_wwnn = htonll(lport->wwnn);
491 hton24(rp->adisc_port_id,
492 fc_host_port_id(lport->host));
493 sp = lport->tt.seq_start_next(sp);
494 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
495 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
496 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
497 FC_TYPE_ELS, f_ctl, 0);
498 lport->tt.seq_send(lport, sp, fp);
499 }
500 }
501 fc_frame_free(in_fp);
502}
503
504/**
34f42a07 505 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
42e9a92f
RL
506 * @lport: Fibre Channel local port recieving the LOGO
507 * @sp: current sequence in the LOGO exchange
508 * @fp: LOGO request frame
509 *
510 * Locking Note: The lport lock is exected to be held before calling
511 * this function.
512 */
513static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
514 struct fc_lport *lport)
515{
516 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
517 fc_lport_enter_reset(lport);
518 fc_frame_free(fp);
519}
520
521/**
34f42a07 522 * fc_fabric_login() - Start the lport state machine
42e9a92f
RL
523 * @lport: The lport that should log into the fabric
524 *
525 * Locking Note: This function should not be called
526 * with the lport lock held.
527 */
528int fc_fabric_login(struct fc_lport *lport)
529{
530 int rc = -1;
531
532 mutex_lock(&lport->lp_mutex);
b1d9fd55 533 if (lport->state == LPORT_ST_DISABLED) {
42e9a92f
RL
534 fc_lport_enter_reset(lport);
535 rc = 0;
536 }
537 mutex_unlock(&lport->lp_mutex);
538
539 return rc;
540}
541EXPORT_SYMBOL(fc_fabric_login);
542
543/**
34f42a07 544 * fc_linkup() - Handler for transport linkup events
42e9a92f
RL
545 * @lport: The lport whose link is up
546 */
547void fc_linkup(struct fc_lport *lport)
548{
7414705e
RL
549 printk(KERN_INFO "libfc: Link up on port (%6x)\n",
550 fc_host_port_id(lport->host));
42e9a92f
RL
551
552 mutex_lock(&lport->lp_mutex);
bc0e17f6
VD
553 if (!lport->link_up) {
554 lport->link_up = 1;
42e9a92f
RL
555
556 if (lport->state == LPORT_ST_RESET)
557 fc_lport_enter_flogi(lport);
558 }
559 mutex_unlock(&lport->lp_mutex);
560}
561EXPORT_SYMBOL(fc_linkup);
562
563/**
34f42a07 564 * fc_linkdown() - Handler for transport linkdown events
42e9a92f
RL
565 * @lport: The lport whose link is down
566 */
567void fc_linkdown(struct fc_lport *lport)
568{
569 mutex_lock(&lport->lp_mutex);
7414705e
RL
570 printk(KERN_INFO "libfc: Link down on port (%6x)\n",
571 fc_host_port_id(lport->host));
42e9a92f 572
bc0e17f6
VD
573 if (lport->link_up) {
574 lport->link_up = 0;
42e9a92f
RL
575 fc_lport_enter_reset(lport);
576 lport->tt.fcp_cleanup(lport);
577 }
578 mutex_unlock(&lport->lp_mutex);
579}
580EXPORT_SYMBOL(fc_linkdown);
581
42e9a92f 582/**
34f42a07 583 * fc_fabric_logoff() - Logout of the fabric
42e9a92f
RL
584 * @lport: fc_lport pointer to logoff the fabric
585 *
586 * Return value:
587 * 0 for success, -1 for failure
34f42a07 588 */
42e9a92f
RL
589int fc_fabric_logoff(struct fc_lport *lport)
590{
591 lport->tt.disc_stop_final(lport);
592 mutex_lock(&lport->lp_mutex);
a0fd2e49
AJ
593 if (lport->dns_rp)
594 lport->tt.rport_logoff(lport->dns_rp);
595 mutex_unlock(&lport->lp_mutex);
596 lport->tt.rport_flush_queue();
597 mutex_lock(&lport->lp_mutex);
42e9a92f
RL
598 fc_lport_enter_logo(lport);
599 mutex_unlock(&lport->lp_mutex);
f7db2c15 600 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
601 return 0;
602}
603EXPORT_SYMBOL(fc_fabric_logoff);
604
605/**
34f42a07 606 * fc_lport_destroy() - unregister a fc_lport
42e9a92f
RL
607 * @lport: fc_lport pointer to unregister
608 *
609 * Return value:
610 * None
611 * Note:
612 * exit routine for fc_lport instance
613 * clean-up all the allocated memory
614 * and free up other system resources.
615 *
34f42a07 616 */
42e9a92f
RL
617int fc_lport_destroy(struct fc_lport *lport)
618{
bbf15669 619 mutex_lock(&lport->lp_mutex);
b1d9fd55 620 lport->state = LPORT_ST_DISABLED;
bbf15669 621 lport->link_up = 0;
42e9a92f 622 lport->tt.frame_send = fc_frame_drop;
bbf15669
AJ
623 mutex_unlock(&lport->lp_mutex);
624
42e9a92f 625 lport->tt.fcp_abort_io(lport);
e9ba8b42 626 lport->tt.disc_stop_final(lport);
1f6ff364 627 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f
RL
628 return 0;
629}
630EXPORT_SYMBOL(fc_lport_destroy);
631
632/**
34f42a07 633 * fc_set_mfs() - sets up the mfs for the corresponding fc_lport
42e9a92f
RL
634 * @lport: fc_lport pointer to unregister
635 * @mfs: the new mfs for fc_lport
636 *
637 * Set mfs for the given fc_lport to the new mfs.
638 *
639 * Return: 0 for success
34f42a07 640 */
42e9a92f
RL
641int fc_set_mfs(struct fc_lport *lport, u32 mfs)
642{
643 unsigned int old_mfs;
644 int rc = -EINVAL;
645
646 mutex_lock(&lport->lp_mutex);
647
648 old_mfs = lport->mfs;
649
650 if (mfs >= FC_MIN_MAX_FRAME) {
651 mfs &= ~3;
652 if (mfs > FC_MAX_FRAME)
653 mfs = FC_MAX_FRAME;
654 mfs -= sizeof(struct fc_frame_header);
655 lport->mfs = mfs;
656 rc = 0;
657 }
658
659 if (!rc && mfs < old_mfs)
660 fc_lport_enter_reset(lport);
661
662 mutex_unlock(&lport->lp_mutex);
663
664 return rc;
665}
666EXPORT_SYMBOL(fc_set_mfs);
667
668/**
34f42a07 669 * fc_lport_disc_callback() - Callback for discovery events
42e9a92f
RL
670 * @lport: FC local port
671 * @event: The discovery event
672 */
673void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
674{
675 switch (event) {
676 case DISC_EV_SUCCESS:
7414705e 677 FC_LPORT_DBG(lport, "Discovery succeeded\n");
42e9a92f
RL
678 break;
679 case DISC_EV_FAILED:
7414705e
RL
680 printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
681 fc_host_port_id(lport->host));
42e9a92f
RL
682 mutex_lock(&lport->lp_mutex);
683 fc_lport_enter_reset(lport);
684 mutex_unlock(&lport->lp_mutex);
685 break;
686 case DISC_EV_NONE:
687 WARN_ON(1);
688 break;
689 }
690}
691
692/**
34f42a07 693 * fc_rport_enter_ready() - Enter the ready state and start discovery
42e9a92f
RL
694 * @lport: Fibre Channel local port that is ready
695 *
696 * Locking Note: The lport lock is expected to be held before calling
697 * this routine.
698 */
699static void fc_lport_enter_ready(struct fc_lport *lport)
700{
7414705e
RL
701 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
702 fc_lport_state(lport));
42e9a92f
RL
703
704 fc_lport_state_enter(lport, LPORT_ST_READY);
705
29d898e9
JE
706 if (!lport->ptp_rp)
707 lport->tt.disc_start(fc_lport_disc_callback, lport);
42e9a92f
RL
708}
709
710/**
34f42a07 711 * fc_lport_recv_flogi_req() - Receive a FLOGI request
42e9a92f
RL
712 * @sp_in: The sequence the FLOGI is on
713 * @rx_fp: The frame the FLOGI is in
714 * @lport: The lport that recieved the request
715 *
716 * A received FLOGI request indicates a point-to-point connection.
717 * Accept it with the common service parameters indicating our N port.
718 * Set up to do a PLOGI if we have the higher-number WWPN.
719 *
720 * Locking Note: The lport lock is exected to be held before calling
721 * this function.
722 */
723static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
724 struct fc_frame *rx_fp,
725 struct fc_lport *lport)
726{
727 struct fc_frame *fp;
728 struct fc_frame_header *fh;
729 struct fc_seq *sp;
730 struct fc_exch *ep;
731 struct fc_els_flogi *flp;
732 struct fc_els_flogi *new_flp;
733 u64 remote_wwpn;
734 u32 remote_fid;
735 u32 local_fid;
736 u32 f_ctl;
737
7414705e
RL
738 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
739 fc_lport_state(lport));
42e9a92f
RL
740
741 fh = fc_frame_header_get(rx_fp);
742 remote_fid = ntoh24(fh->fh_s_id);
743 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
744 if (!flp)
745 goto out;
746 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
747 if (remote_wwpn == lport->wwpn) {
7414705e
RL
748 printk(KERN_WARNING "libfc: Received FLOGI from port "
749 "with same WWPN %llx\n", remote_wwpn);
42e9a92f
RL
750 goto out;
751 }
7414705e 752 FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
42e9a92f
RL
753
754 /*
755 * XXX what is the right thing to do for FIDs?
756 * The originator might expect our S_ID to be 0xfffffe.
757 * But if so, both of us could end up with the same FID.
758 */
759 local_fid = FC_LOCAL_PTP_FID_LO;
760 if (remote_wwpn < lport->wwpn) {
761 local_fid = FC_LOCAL_PTP_FID_HI;
762 if (!remote_fid || remote_fid == local_fid)
763 remote_fid = FC_LOCAL_PTP_FID_LO;
764 } else if (!remote_fid) {
765 remote_fid = FC_LOCAL_PTP_FID_HI;
766 }
767
768 fc_host_port_id(lport->host) = local_fid;
769
770 fp = fc_frame_alloc(lport, sizeof(*flp));
771 if (fp) {
772 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
773 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
774 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
775 new_flp->fl_cmd = (u8) ELS_LS_ACC;
776
777 /*
778 * Send the response. If this fails, the originator should
779 * repeat the sequence.
780 */
781 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
782 ep = fc_seq_exch(sp);
783 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
784 FC_TYPE_ELS, f_ctl, 0);
785 lport->tt.seq_send(lport, sp, fp);
786
787 } else {
788 fc_lport_error(lport, fp);
789 }
790 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
791 get_unaligned_be64(&flp->fl_wwnn));
792
42e9a92f
RL
793out:
794 sp = fr_seq(rx_fp);
795 fc_frame_free(rx_fp);
796}
797
798/**
34f42a07 799 * fc_lport_recv_req() - The generic lport request handler
42e9a92f
RL
800 * @lport: The lport that received the request
801 * @sp: The sequence the request is on
802 * @fp: The frame the request is in
803 *
804 * This function will see if the lport handles the request or
805 * if an rport should handle the request.
806 *
807 * Locking Note: This function should not be called with the lport
808 * lock held becuase it will grab the lock.
809 */
810static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
811 struct fc_frame *fp)
812{
813 struct fc_frame_header *fh = fc_frame_header_get(fp);
814 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
42e9a92f
RL
815
816 mutex_lock(&lport->lp_mutex);
817
818 /*
819 * Handle special ELS cases like FLOGI, LOGO, and
820 * RSCN here. These don't require a session.
821 * Even if we had a session, it might not be ready.
822 */
e9ba8b42
JE
823 if (!lport->link_up)
824 fc_frame_free(fp);
825 else if (fh->fh_type == FC_TYPE_ELS &&
826 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
42e9a92f
RL
827 /*
828 * Check opcode.
829 */
131203a1 830 recv = lport->tt.rport_recv_req;
42e9a92f
RL
831 switch (fc_frame_payload_op(fp)) {
832 case ELS_FLOGI:
833 recv = fc_lport_recv_flogi_req;
834 break;
835 case ELS_LOGO:
836 fh = fc_frame_header_get(fp);
837 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
838 recv = fc_lport_recv_logo_req;
839 break;
840 case ELS_RSCN:
841 recv = lport->tt.disc_recv_req;
842 break;
843 case ELS_ECHO:
844 recv = fc_lport_recv_echo_req;
845 break;
846 case ELS_RLIR:
847 recv = fc_lport_recv_rlir_req;
848 break;
849 case ELS_RNID:
850 recv = fc_lport_recv_rnid_req;
851 break;
852 case ELS_ADISC:
853 recv = fc_lport_recv_adisc_req;
854 break;
855 }
856
131203a1 857 recv(sp, fp, lport);
42e9a92f 858 } else {
7414705e
RL
859 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
860 fr_eof(fp));
42e9a92f
RL
861 fc_frame_free(fp);
862 }
863 mutex_unlock(&lport->lp_mutex);
864
865 /*
866 * The common exch_done for all request may not be good
867 * if any request requires longer hold on exhange. XXX
868 */
869 lport->tt.exch_done(sp);
870}
871
872/**
34f42a07 873 * fc_lport_reset() - Reset an lport
42e9a92f
RL
874 * @lport: The lport which should be reset
875 *
876 * Locking Note: This functions should not be called with the
877 * lport lock held.
878 */
879int fc_lport_reset(struct fc_lport *lport)
880{
f7db2c15 881 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
882 mutex_lock(&lport->lp_mutex);
883 fc_lport_enter_reset(lport);
884 mutex_unlock(&lport->lp_mutex);
885 return 0;
886}
887EXPORT_SYMBOL(fc_lport_reset);
888
889/**
1190d925 890 * fc_lport_reset_locked() - Reset the local port
42e9a92f
RL
891 * @lport: Fibre Channel local port to be reset
892 *
893 * Locking Note: The lport lock is expected to be held before calling
894 * this routine.
895 */
1190d925 896static void fc_lport_reset_locked(struct fc_lport *lport)
42e9a92f 897{
42e9a92f
RL
898 if (lport->dns_rp)
899 lport->tt.rport_logoff(lport->dns_rp);
900
48f00902 901 lport->ptp_rp = NULL;
42e9a92f
RL
902
903 lport->tt.disc_stop(lport);
904
1f6ff364 905 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f
RL
906 fc_host_fabric_name(lport->host) = 0;
907 fc_host_port_id(lport->host) = 0;
1190d925 908}
42e9a92f 909
1190d925
JE
910/**
911 * fc_lport_enter_reset() - Reset the local port
912 * @lport: Fibre Channel local port to be reset
913 *
914 * Locking Note: The lport lock is expected to be held before calling
915 * this routine.
916 */
917static void fc_lport_enter_reset(struct fc_lport *lport)
918{
919 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
920 fc_lport_state(lport));
921
922 fc_lport_state_enter(lport, LPORT_ST_RESET);
923 fc_lport_reset_locked(lport);
bc0e17f6 924 if (lport->link_up)
42e9a92f
RL
925 fc_lport_enter_flogi(lport);
926}
927
1190d925
JE
928/**
929 * fc_lport_enter_disabled() - disable the local port
930 * @lport: Fibre Channel local port to be reset
931 *
932 * Locking Note: The lport lock is expected to be held before calling
933 * this routine.
934 */
935static void fc_lport_enter_disabled(struct fc_lport *lport)
936{
937 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
938 fc_lport_state(lport));
939
940 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
941 fc_lport_reset_locked(lport);
942}
943
42e9a92f 944/**
34f42a07 945 * fc_lport_error() - Handler for any errors
42e9a92f
RL
946 * @lport: The fc_lport object
947 * @fp: The frame pointer
948 *
949 * If the error was caused by a resource allocation failure
950 * then wait for half a second and retry, otherwise retry
951 * after the e_d_tov time.
952 */
953static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
954{
955 unsigned long delay = 0;
7414705e
RL
956 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
957 PTR_ERR(fp), fc_lport_state(lport),
958 lport->retry_count);
42e9a92f
RL
959
960 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
961 /*
962 * Memory allocation failure, or the exchange timed out.
963 * Retry after delay
964 */
965 if (lport->retry_count < lport->max_retry_count) {
966 lport->retry_count++;
967 if (!fp)
968 delay = msecs_to_jiffies(500);
969 else
970 delay = msecs_to_jiffies(lport->e_d_tov);
971
972 schedule_delayed_work(&lport->retry_work, delay);
973 } else {
974 switch (lport->state) {
b1d9fd55 975 case LPORT_ST_DISABLED:
42e9a92f
RL
976 case LPORT_ST_READY:
977 case LPORT_ST_RESET:
978 case LPORT_ST_RPN_ID:
979 case LPORT_ST_RFT_ID:
980 case LPORT_ST_SCR:
981 case LPORT_ST_DNS:
982 case LPORT_ST_FLOGI:
983 case LPORT_ST_LOGO:
984 fc_lport_enter_reset(lport);
985 break;
986 }
987 }
988 }
989}
990
991/**
34f42a07
RL
992 * fc_lport_rft_id_resp() - Handle response to Register Fibre
993 * Channel Types by ID (RPN_ID) request
42e9a92f
RL
994 * @sp: current sequence in RPN_ID exchange
995 * @fp: response frame
996 * @lp_arg: Fibre Channel host port instance
997 *
998 * Locking Note: This function will be called without the lport lock
999 * held, but it will lock, call an _enter_* function or fc_lport_error
1000 * and then unlock the lport.
1001 */
1002static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1003 void *lp_arg)
1004{
1005 struct fc_lport *lport = lp_arg;
1006 struct fc_frame_header *fh;
1007 struct fc_ct_hdr *ct;
1008
1009 if (fp == ERR_PTR(-FC_EX_CLOSED))
1010 return;
1011
1012 mutex_lock(&lport->lp_mutex);
1013
7414705e 1014 FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
42e9a92f
RL
1015
1016 if (lport->state != LPORT_ST_RFT_ID) {
7414705e
RL
1017 FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
1018 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1019 if (IS_ERR(fp))
1020 goto err;
42e9a92f
RL
1021 goto out;
1022 }
1023
76f6804e
AJ
1024 if (IS_ERR(fp)) {
1025 fc_lport_error(lport, fp);
1026 goto err;
1027 }
1028
42e9a92f
RL
1029 fh = fc_frame_header_get(fp);
1030 ct = fc_frame_payload_get(fp, sizeof(*ct));
1031
1032 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1033 ct->ct_fs_type == FC_FST_DIR &&
1034 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1035 ntohs(ct->ct_cmd) == FC_FS_ACC)
1036 fc_lport_enter_scr(lport);
1037 else
1038 fc_lport_error(lport, fp);
1039out:
1040 fc_frame_free(fp);
1041err:
1042 mutex_unlock(&lport->lp_mutex);
1043}
1044
1045/**
34f42a07
RL
1046 * fc_lport_rpn_id_resp() - Handle response to Register Port
1047 * Name by ID (RPN_ID) request
42e9a92f
RL
1048 * @sp: current sequence in RPN_ID exchange
1049 * @fp: response frame
1050 * @lp_arg: Fibre Channel host port instance
1051 *
1052 * Locking Note: This function will be called without the lport lock
1053 * held, but it will lock, call an _enter_* function or fc_lport_error
1054 * and then unlock the lport.
1055 */
1056static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1057 void *lp_arg)
1058{
1059 struct fc_lport *lport = lp_arg;
1060 struct fc_frame_header *fh;
1061 struct fc_ct_hdr *ct;
1062
1063 if (fp == ERR_PTR(-FC_EX_CLOSED))
1064 return;
1065
1066 mutex_lock(&lport->lp_mutex);
1067
7414705e 1068 FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
42e9a92f
RL
1069
1070 if (lport->state != LPORT_ST_RPN_ID) {
7414705e
RL
1071 FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
1072 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1073 if (IS_ERR(fp))
1074 goto err;
42e9a92f
RL
1075 goto out;
1076 }
1077
76f6804e
AJ
1078 if (IS_ERR(fp)) {
1079 fc_lport_error(lport, fp);
1080 goto err;
1081 }
1082
42e9a92f
RL
1083 fh = fc_frame_header_get(fp);
1084 ct = fc_frame_payload_get(fp, sizeof(*ct));
1085 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1086 ct->ct_fs_type == FC_FST_DIR &&
1087 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1088 ntohs(ct->ct_cmd) == FC_FS_ACC)
1089 fc_lport_enter_rft_id(lport);
1090 else
1091 fc_lport_error(lport, fp);
1092
1093out:
1094 fc_frame_free(fp);
1095err:
1096 mutex_unlock(&lport->lp_mutex);
1097}
1098
1099/**
34f42a07 1100 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
42e9a92f
RL
1101 * @sp: current sequence in SCR exchange
1102 * @fp: response frame
1103 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1104 *
1105 * Locking Note: This function will be called without the lport lock
1106 * held, but it will lock, call an _enter_* function or fc_lport_error
1107 * and then unlock the lport.
1108 */
1109static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1110 void *lp_arg)
1111{
1112 struct fc_lport *lport = lp_arg;
1113 u8 op;
1114
1115 if (fp == ERR_PTR(-FC_EX_CLOSED))
1116 return;
1117
1118 mutex_lock(&lport->lp_mutex);
1119
7414705e 1120 FC_LPORT_DBG(lport, "Received a SCR response\n");
42e9a92f
RL
1121
1122 if (lport->state != LPORT_ST_SCR) {
7414705e
RL
1123 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1124 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1125 if (IS_ERR(fp))
1126 goto err;
42e9a92f
RL
1127 goto out;
1128 }
1129
76f6804e
AJ
1130 if (IS_ERR(fp)) {
1131 fc_lport_error(lport, fp);
1132 goto err;
1133 }
1134
42e9a92f
RL
1135 op = fc_frame_payload_op(fp);
1136 if (op == ELS_LS_ACC)
1137 fc_lport_enter_ready(lport);
1138 else
1139 fc_lport_error(lport, fp);
1140
1141out:
1142 fc_frame_free(fp);
1143err:
1144 mutex_unlock(&lport->lp_mutex);
1145}
1146
1147/**
34f42a07 1148 * fc_lport_enter_scr() - Send a State Change Register (SCR) request
42e9a92f
RL
1149 * @lport: Fibre Channel local port to register for state changes
1150 *
1151 * Locking Note: The lport lock is expected to be held before calling
1152 * this routine.
1153 */
1154static void fc_lport_enter_scr(struct fc_lport *lport)
1155{
1156 struct fc_frame *fp;
1157
7414705e
RL
1158 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1159 fc_lport_state(lport));
42e9a92f
RL
1160
1161 fc_lport_state_enter(lport, LPORT_ST_SCR);
1162
1163 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1164 if (!fp) {
1165 fc_lport_error(lport, fp);
1166 return;
1167 }
1168
a46f327a 1169 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
42e9a92f
RL
1170 fc_lport_scr_resp, lport, lport->e_d_tov))
1171 fc_lport_error(lport, fp);
1172}
1173
1174/**
34f42a07 1175 * fc_lport_enter_rft_id() - Register FC4-types with the name server
42e9a92f
RL
1176 * @lport: Fibre Channel local port to register
1177 *
1178 * Locking Note: The lport lock is expected to be held before calling
1179 * this routine.
1180 */
1181static void fc_lport_enter_rft_id(struct fc_lport *lport)
1182{
1183 struct fc_frame *fp;
1184 struct fc_ns_fts *lps;
1185 int i;
1186
7414705e
RL
1187 FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
1188 fc_lport_state(lport));
42e9a92f
RL
1189
1190 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1191
1192 lps = &lport->fcts;
1193 i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
1194 while (--i >= 0)
1195 if (ntohl(lps->ff_type_map[i]) != 0)
1196 break;
1197 if (i < 0) {
1198 /* nothing to register, move on to SCR */
1199 fc_lport_enter_scr(lport);
1200 return;
1201 }
1202
1203 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1204 sizeof(struct fc_ns_rft));
1205 if (!fp) {
1206 fc_lport_error(lport, fp);
1207 return;
1208 }
1209
a46f327a 1210 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
42e9a92f
RL
1211 fc_lport_rft_id_resp,
1212 lport, lport->e_d_tov))
1213 fc_lport_error(lport, fp);
1214}
1215
1216/**
34f42a07 1217 * fc_rport_enter_rft_id() - Register port name with the name server
42e9a92f
RL
1218 * @lport: Fibre Channel local port to register
1219 *
1220 * Locking Note: The lport lock is expected to be held before calling
1221 * this routine.
1222 */
1223static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1224{
1225 struct fc_frame *fp;
1226
7414705e
RL
1227 FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
1228 fc_lport_state(lport));
42e9a92f
RL
1229
1230 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1231
1232 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1233 sizeof(struct fc_ns_rn_id));
1234 if (!fp) {
1235 fc_lport_error(lport, fp);
1236 return;
1237 }
1238
a46f327a 1239 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
42e9a92f
RL
1240 fc_lport_rpn_id_resp,
1241 lport, lport->e_d_tov))
1242 fc_lport_error(lport, fp);
1243}
1244
1245static struct fc_rport_operations fc_lport_rport_ops = {
1246 .event_callback = fc_lport_rport_callback,
1247};
1248
1249/**
34f42a07 1250 * fc_rport_enter_dns() - Create a rport to the name server
42e9a92f
RL
1251 * @lport: Fibre Channel local port requesting a rport for the name server
1252 *
1253 * Locking Note: The lport lock is expected to be held before calling
1254 * this routine.
1255 */
1256static void fc_lport_enter_dns(struct fc_lport *lport)
1257{
ab28f1fd 1258 struct fc_rport_priv *rdata;
42e9a92f 1259
7414705e
RL
1260 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1261 fc_lport_state(lport));
42e9a92f
RL
1262
1263 fc_lport_state_enter(lport, LPORT_ST_DNS);
1264
48f00902 1265 mutex_lock(&lport->disc.disc_mutex);
9737e6a7 1266 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
48f00902 1267 mutex_unlock(&lport->disc.disc_mutex);
9fb9d328 1268 if (!rdata)
42e9a92f
RL
1269 goto err;
1270
42e9a92f 1271 rdata->ops = &fc_lport_rport_ops;
9fb9d328 1272 lport->tt.rport_login(rdata);
42e9a92f
RL
1273 return;
1274
1275err:
1276 fc_lport_error(lport, NULL);
1277}
1278
1279/**
34f42a07 1280 * fc_lport_timeout() - Handler for the retry_work timer.
42e9a92f
RL
1281 * @work: The work struct of the fc_lport
1282 */
1283static void fc_lport_timeout(struct work_struct *work)
1284{
1285 struct fc_lport *lport =
1286 container_of(work, struct fc_lport,
1287 retry_work.work);
1288
1289 mutex_lock(&lport->lp_mutex);
1290
1291 switch (lport->state) {
b1d9fd55 1292 case LPORT_ST_DISABLED:
42e9a92f
RL
1293 case LPORT_ST_READY:
1294 case LPORT_ST_RESET:
1295 WARN_ON(1);
1296 break;
1297 case LPORT_ST_FLOGI:
1298 fc_lport_enter_flogi(lport);
1299 break;
1300 case LPORT_ST_DNS:
1301 fc_lport_enter_dns(lport);
1302 break;
1303 case LPORT_ST_RPN_ID:
1304 fc_lport_enter_rpn_id(lport);
1305 break;
1306 case LPORT_ST_RFT_ID:
1307 fc_lport_enter_rft_id(lport);
1308 break;
1309 case LPORT_ST_SCR:
1310 fc_lport_enter_scr(lport);
1311 break;
1312 case LPORT_ST_LOGO:
1313 fc_lport_enter_logo(lport);
1314 break;
1315 }
1316
1317 mutex_unlock(&lport->lp_mutex);
1318}
1319
1320/**
34f42a07 1321 * fc_lport_logo_resp() - Handle response to LOGO request
42e9a92f
RL
1322 * @sp: current sequence in LOGO exchange
1323 * @fp: response frame
1324 * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
1325 *
1326 * Locking Note: This function will be called without the lport lock
1327 * held, but it will lock, call an _enter_* function or fc_lport_error
1328 * and then unlock the lport.
1329 */
1330static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1331 void *lp_arg)
1332{
1333 struct fc_lport *lport = lp_arg;
1334 u8 op;
1335
1336 if (fp == ERR_PTR(-FC_EX_CLOSED))
1337 return;
1338
1339 mutex_lock(&lport->lp_mutex);
1340
7414705e 1341 FC_LPORT_DBG(lport, "Received a LOGO response\n");
42e9a92f
RL
1342
1343 if (lport->state != LPORT_ST_LOGO) {
7414705e
RL
1344 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1345 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1346 if (IS_ERR(fp))
1347 goto err;
42e9a92f
RL
1348 goto out;
1349 }
1350
76f6804e
AJ
1351 if (IS_ERR(fp)) {
1352 fc_lport_error(lport, fp);
1353 goto err;
1354 }
1355
42e9a92f
RL
1356 op = fc_frame_payload_op(fp);
1357 if (op == ELS_LS_ACC)
1190d925 1358 fc_lport_enter_disabled(lport);
42e9a92f
RL
1359 else
1360 fc_lport_error(lport, fp);
1361
1362out:
1363 fc_frame_free(fp);
1364err:
1365 mutex_unlock(&lport->lp_mutex);
1366}
1367
1368/**
34f42a07 1369 * fc_rport_enter_logo() - Logout of the fabric
42e9a92f
RL
1370 * @lport: Fibre Channel local port to be logged out
1371 *
1372 * Locking Note: The lport lock is expected to be held before calling
1373 * this routine.
1374 */
1375static void fc_lport_enter_logo(struct fc_lport *lport)
1376{
1377 struct fc_frame *fp;
1378 struct fc_els_logo *logo;
1379
7414705e
RL
1380 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1381 fc_lport_state(lport));
42e9a92f
RL
1382
1383 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1384
42e9a92f
RL
1385 fp = fc_frame_alloc(lport, sizeof(*logo));
1386 if (!fp) {
1387 fc_lport_error(lport, fp);
1388 return;
1389 }
1390
a46f327a
JE
1391 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1392 fc_lport_logo_resp, lport, lport->e_d_tov))
42e9a92f
RL
1393 fc_lport_error(lport, fp);
1394}
1395
1396/**
34f42a07 1397 * fc_lport_flogi_resp() - Handle response to FLOGI request
42e9a92f
RL
1398 * @sp: current sequence in FLOGI exchange
1399 * @fp: response frame
1400 * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
1401 *
1402 * Locking Note: This function will be called without the lport lock
1403 * held, but it will lock, call an _enter_* function or fc_lport_error
1404 * and then unlock the lport.
1405 */
1406static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1407 void *lp_arg)
1408{
1409 struct fc_lport *lport = lp_arg;
1410 struct fc_frame_header *fh;
1411 struct fc_els_flogi *flp;
1412 u32 did;
1413 u16 csp_flags;
1414 unsigned int r_a_tov;
1415 unsigned int e_d_tov;
1416 u16 mfs;
1417
1418 if (fp == ERR_PTR(-FC_EX_CLOSED))
1419 return;
1420
1421 mutex_lock(&lport->lp_mutex);
1422
7414705e 1423 FC_LPORT_DBG(lport, "Received a FLOGI response\n");
42e9a92f
RL
1424
1425 if (lport->state != LPORT_ST_FLOGI) {
7414705e
RL
1426 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1427 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1428 if (IS_ERR(fp))
1429 goto err;
42e9a92f
RL
1430 goto out;
1431 }
1432
76f6804e
AJ
1433 if (IS_ERR(fp)) {
1434 fc_lport_error(lport, fp);
1435 goto err;
1436 }
1437
42e9a92f
RL
1438 fh = fc_frame_header_get(fp);
1439 did = ntoh24(fh->fh_d_id);
1440 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1441
7414705e
RL
1442 printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
1443 did);
42e9a92f
RL
1444 fc_host_port_id(lport->host) = did;
1445
1446 flp = fc_frame_payload_get(fp, sizeof(*flp));
1447 if (flp) {
1448 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1449 FC_SP_BB_DATA_MASK;
1450 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1451 mfs < lport->mfs)
1452 lport->mfs = mfs;
1453 csp_flags = ntohs(flp->fl_csp.sp_features);
1454 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1455 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1456 if (csp_flags & FC_SP_FT_EDTR)
1457 e_d_tov /= 1000000;
1458 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1459 if (e_d_tov > lport->e_d_tov)
1460 lport->e_d_tov = e_d_tov;
1461 lport->r_a_tov = 2 * e_d_tov;
7414705e
RL
1462 printk(KERN_INFO "libfc: Port (%6x) entered "
1463 "point to point mode\n", did);
42e9a92f
RL
1464 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1465 get_unaligned_be64(
1466 &flp->fl_wwpn),
1467 get_unaligned_be64(
1468 &flp->fl_wwnn));
1469 } else {
1470 lport->e_d_tov = e_d_tov;
1471 lport->r_a_tov = r_a_tov;
1472 fc_host_fabric_name(lport->host) =
1473 get_unaligned_be64(&flp->fl_wwnn);
1474 fc_lport_enter_dns(lport);
1475 }
1476 }
42e9a92f 1477 } else {
7414705e 1478 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
42e9a92f
RL
1479 }
1480
1481out:
1482 fc_frame_free(fp);
1483err:
1484 mutex_unlock(&lport->lp_mutex);
1485}
1486
1487/**
34f42a07 1488 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
42e9a92f
RL
1489 * @lport: Fibre Channel local port to be logged in to the fabric
1490 *
1491 * Locking Note: The lport lock is expected to be held before calling
1492 * this routine.
1493 */
1494void fc_lport_enter_flogi(struct fc_lport *lport)
1495{
1496 struct fc_frame *fp;
1497
7414705e
RL
1498 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1499 fc_lport_state(lport));
42e9a92f
RL
1500
1501 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1502
1503 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1504 if (!fp)
1505 return fc_lport_error(lport, fp);
1506
a46f327a 1507 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
42e9a92f
RL
1508 fc_lport_flogi_resp, lport, lport->e_d_tov))
1509 fc_lport_error(lport, fp);
1510}
1511
1512/* Configure a fc_lport */
1513int fc_lport_config(struct fc_lport *lport)
1514{
1515 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1516 mutex_init(&lport->lp_mutex);
1517
b1d9fd55 1518 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
42e9a92f
RL
1519
1520 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1521 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1522
1523 return 0;
1524}
1525EXPORT_SYMBOL(fc_lport_config);
1526
1527int fc_lport_init(struct fc_lport *lport)
1528{
1529 if (!lport->tt.lport_recv)
1530 lport->tt.lport_recv = fc_lport_recv_req;
1531
1532 if (!lport->tt.lport_reset)
1533 lport->tt.lport_reset = fc_lport_reset;
1534
1535 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1536 fc_host_node_name(lport->host) = lport->wwnn;
1537 fc_host_port_name(lport->host) = lport->wwpn;
1538 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1539 memset(fc_host_supported_fc4s(lport->host), 0,
1540 sizeof(fc_host_supported_fc4s(lport->host)));
1541 fc_host_supported_fc4s(lport->host)[2] = 1;
1542 fc_host_supported_fc4s(lport->host)[7] = 1;
1543
1544 /* This value is also unchanging */
1545 memset(fc_host_active_fc4s(lport->host), 0,
1546 sizeof(fc_host_active_fc4s(lport->host)));
1547 fc_host_active_fc4s(lport->host)[2] = 1;
1548 fc_host_active_fc4s(lport->host)[7] = 1;
1549 fc_host_maxframe_size(lport->host) = lport->mfs;
1550 fc_host_supported_speeds(lport->host) = 0;
1551 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1552 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1553 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1554 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1555
96316099 1556 INIT_LIST_HEAD(&lport->ema_list);
42e9a92f
RL
1557 return 0;
1558}
1559EXPORT_SYMBOL(fc_lport_init);