[SCSI] libfc: rename rport state "NONE" to "DELETE".
[linux-2.6-block.git] / drivers / scsi / libfc / fc_rport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * RPORT GENERAL INFO
22 *
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
27 *
28 * fc_rport's represent N_Port's within the fabric.
29 */
30
31/*
32 * RPORT LOCKING
33 *
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
38 *
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
45 */
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/interrupt.h>
50#include <linux/rcupdate.h>
51#include <linux/timer.h>
52#include <linux/workqueue.h>
53#include <asm/unaligned.h>
54
55#include <scsi/libfc.h>
56#include <scsi/fc_encode.h>
57
42e9a92f
RL
58struct workqueue_struct *rport_event_queue;
59
60static void fc_rport_enter_plogi(struct fc_rport *);
61static void fc_rport_enter_prli(struct fc_rport *);
62static void fc_rport_enter_rtv(struct fc_rport *);
63static void fc_rport_enter_ready(struct fc_rport *);
64static void fc_rport_enter_logo(struct fc_rport *);
65
66static void fc_rport_recv_plogi_req(struct fc_rport *,
67 struct fc_seq *, struct fc_frame *);
68static void fc_rport_recv_prli_req(struct fc_rport *,
69 struct fc_seq *, struct fc_frame *);
70static void fc_rport_recv_prlo_req(struct fc_rport *,
71 struct fc_seq *, struct fc_frame *);
72static void fc_rport_recv_logo_req(struct fc_rport *,
73 struct fc_seq *, struct fc_frame *);
74static void fc_rport_timeout(struct work_struct *);
75static void fc_rport_error(struct fc_rport *, struct fc_frame *);
6755db1c 76static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *);
42e9a92f
RL
77static void fc_rport_work(struct work_struct *);
78
79static const char *fc_rport_state_names[] = {
42e9a92f
RL
80 [RPORT_ST_INIT] = "Init",
81 [RPORT_ST_PLOGI] = "PLOGI",
82 [RPORT_ST_PRLI] = "PRLI",
83 [RPORT_ST_RTV] = "RTV",
84 [RPORT_ST_READY] = "Ready",
85 [RPORT_ST_LOGO] = "LOGO",
14194054 86 [RPORT_ST_DELETE] = "Delete",
42e9a92f
RL
87};
88
89static void fc_rport_rogue_destroy(struct device *dev)
90{
91 struct fc_rport *rport = dev_to_rport(dev);
7414705e 92 FC_RPORT_DBG(rport, "Destroying rogue rport\n");
42e9a92f
RL
93 kfree(rport);
94}
95
96struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
97{
98 struct fc_rport *rport;
99 struct fc_rport_libfc_priv *rdata;
100 rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
101
102 if (!rport)
103 return NULL;
104
105 rdata = RPORT_TO_PRIV(rport);
106
107 rport->dd_data = rdata;
108 rport->port_id = dp->ids.port_id;
109 rport->port_name = dp->ids.port_name;
110 rport->node_name = dp->ids.node_name;
111 rport->roles = dp->ids.roles;
112 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
113 /*
114 * Note: all this libfc rogue rport code will be removed for
115 * upstream so it fine that this is really ugly and hacky right now.
116 */
117 device_initialize(&rport->dev);
118 rport->dev.release = fc_rport_rogue_destroy;
119
120 mutex_init(&rdata->rp_mutex);
121 rdata->local_port = dp->lp;
122 rdata->trans_state = FC_PORTSTATE_ROGUE;
123 rdata->rp_state = RPORT_ST_INIT;
124 rdata->event = RPORT_EV_NONE;
125 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
126 rdata->ops = NULL;
127 rdata->e_d_tov = dp->lp->e_d_tov;
128 rdata->r_a_tov = dp->lp->r_a_tov;
129 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
130 INIT_WORK(&rdata->event_work, fc_rport_work);
131 /*
132 * For good measure, but not necessary as we should only
133 * add REAL rport to the lport list.
134 */
135 INIT_LIST_HEAD(&rdata->peers);
136
137 return rport;
138}
139
140/**
34f42a07 141 * fc_rport_state() - return a string for the state the rport is in
42e9a92f
RL
142 * @rport: The rport whose state we want to get a string for
143 */
144static const char *fc_rport_state(struct fc_rport *rport)
145{
146 const char *cp;
147 struct fc_rport_libfc_priv *rdata = rport->dd_data;
148
149 cp = fc_rport_state_names[rdata->rp_state];
150 if (!cp)
151 cp = "Unknown";
152 return cp;
153}
154
155/**
34f42a07 156 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
42e9a92f
RL
157 * @rport: Pointer to Fibre Channel remote port structure
158 * @timeout: timeout in seconds
159 */
160void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
161{
162 if (timeout)
163 rport->dev_loss_tmo = timeout + 5;
164 else
165 rport->dev_loss_tmo = 30;
166}
167EXPORT_SYMBOL(fc_set_rport_loss_tmo);
168
169/**
34f42a07 170 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
42e9a92f
RL
171 * @flp: FLOGI payload structure
172 * @maxval: upper limit, may be less than what is in the service parameters
173 */
b2ab99c9
RL
174static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
175 unsigned int maxval)
42e9a92f
RL
176{
177 unsigned int mfs;
178
179 /*
180 * Get max payload from the common service parameters and the
181 * class 3 receive data field size.
182 */
183 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
184 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
185 maxval = mfs;
186 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
187 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
188 maxval = mfs;
189 return maxval;
190}
191
192/**
34f42a07 193 * fc_rport_state_enter() - Change the rport's state
42e9a92f
RL
194 * @rport: The rport whose state should change
195 * @new: The new state of the rport
196 *
197 * Locking Note: Called with the rport lock held
198 */
199static void fc_rport_state_enter(struct fc_rport *rport,
200 enum fc_rport_state new)
201{
202 struct fc_rport_libfc_priv *rdata = rport->dd_data;
203 if (rdata->rp_state != new)
204 rdata->retries = 0;
205 rdata->rp_state = new;
206}
207
208static void fc_rport_work(struct work_struct *work)
209{
571f824c 210 u32 port_id;
42e9a92f
RL
211 struct fc_rport_libfc_priv *rdata =
212 container_of(work, struct fc_rport_libfc_priv, event_work);
213 enum fc_rport_event event;
214 enum fc_rport_trans_state trans_state;
215 struct fc_lport *lport = rdata->local_port;
216 struct fc_rport_operations *rport_ops;
217 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
218
219 mutex_lock(&rdata->rp_mutex);
220 event = rdata->event;
221 rport_ops = rdata->ops;
222
223 if (event == RPORT_EV_CREATED) {
224 struct fc_rport *new_rport;
225 struct fc_rport_libfc_priv *new_rdata;
226 struct fc_rport_identifiers ids;
227
228 ids.port_id = rport->port_id;
229 ids.roles = rport->roles;
230 ids.port_name = rport->port_name;
231 ids.node_name = rport->node_name;
232
233 mutex_unlock(&rdata->rp_mutex);
234
235 new_rport = fc_remote_port_add(lport->host, 0, &ids);
236 if (new_rport) {
237 /*
238 * Switch from the rogue rport to the rport
239 * returned by the FC class.
240 */
241 new_rport->maxframe_size = rport->maxframe_size;
242
243 new_rdata = new_rport->dd_data;
244 new_rdata->e_d_tov = rdata->e_d_tov;
245 new_rdata->r_a_tov = rdata->r_a_tov;
246 new_rdata->ops = rdata->ops;
247 new_rdata->local_port = rdata->local_port;
248 new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
249 new_rdata->trans_state = FC_PORTSTATE_REAL;
250 mutex_init(&new_rdata->rp_mutex);
251 INIT_DELAYED_WORK(&new_rdata->retry_work,
252 fc_rport_timeout);
253 INIT_LIST_HEAD(&new_rdata->peers);
254 INIT_WORK(&new_rdata->event_work, fc_rport_work);
255
256 fc_rport_state_enter(new_rport, RPORT_ST_READY);
257 } else {
7414705e
RL
258 printk(KERN_WARNING "libfc: Failed to allocate "
259 " memory for rport (%6x)\n", ids.port_id);
42e9a92f
RL
260 event = RPORT_EV_FAILED;
261 }
b4c6f546
AJ
262 if (rport->port_id != FC_FID_DIR_SERV)
263 if (rport_ops->event_callback)
264 rport_ops->event_callback(lport, rport,
265 RPORT_EV_FAILED);
42e9a92f
RL
266 put_device(&rport->dev);
267 rport = new_rport;
268 rdata = new_rport->dd_data;
269 if (rport_ops->event_callback)
270 rport_ops->event_callback(lport, rport, event);
271 } else if ((event == RPORT_EV_FAILED) ||
272 (event == RPORT_EV_LOGO) ||
273 (event == RPORT_EV_STOP)) {
274 trans_state = rdata->trans_state;
275 mutex_unlock(&rdata->rp_mutex);
276 if (rport_ops->event_callback)
277 rport_ops->event_callback(lport, rport, event);
278 if (trans_state == FC_PORTSTATE_ROGUE)
279 put_device(&rport->dev);
571f824c
AJ
280 else {
281 port_id = rport->port_id;
42e9a92f 282 fc_remote_port_delete(rport);
571f824c
AJ
283 lport->tt.exch_mgr_reset(lport, 0, port_id);
284 lport->tt.exch_mgr_reset(lport, port_id, 0);
285 }
42e9a92f
RL
286 } else
287 mutex_unlock(&rdata->rp_mutex);
288}
289
290/**
34f42a07 291 * fc_rport_login() - Start the remote port login state machine
42e9a92f
RL
292 * @rport: Fibre Channel remote port
293 *
294 * Locking Note: Called without the rport lock held. This
295 * function will hold the rport lock, call an _enter_*
296 * function and then unlock the rport.
297 */
298int fc_rport_login(struct fc_rport *rport)
299{
300 struct fc_rport_libfc_priv *rdata = rport->dd_data;
301
302 mutex_lock(&rdata->rp_mutex);
303
7414705e 304 FC_RPORT_DBG(rport, "Login to port\n");
42e9a92f
RL
305
306 fc_rport_enter_plogi(rport);
307
308 mutex_unlock(&rdata->rp_mutex);
309
310 return 0;
311}
312
313/**
34f42a07 314 * fc_rport_logoff() - Logoff and remove an rport
42e9a92f
RL
315 * @rport: Fibre Channel remote port to be removed
316 *
317 * Locking Note: Called without the rport lock held. This
318 * function will hold the rport lock, call an _enter_*
319 * function and then unlock the rport.
320 */
321int fc_rport_logoff(struct fc_rport *rport)
322{
323 struct fc_rport_libfc_priv *rdata = rport->dd_data;
324
325 mutex_lock(&rdata->rp_mutex);
326
7414705e 327 FC_RPORT_DBG(rport, "Remove port\n");
42e9a92f 328
14194054
JE
329 if (rdata->rp_state == RPORT_ST_DELETE) {
330 FC_RPORT_DBG(rport, "Port in Delete state, not removing\n");
b4c6f546
AJ
331 mutex_unlock(&rdata->rp_mutex);
332 goto out;
333 }
334
42e9a92f
RL
335 fc_rport_enter_logo(rport);
336
337 /*
14194054 338 * Change the state to Delete so that we discard
42e9a92f
RL
339 * the response.
340 */
14194054 341 fc_rport_state_enter(rport, RPORT_ST_DELETE);
42e9a92f
RL
342
343 mutex_unlock(&rdata->rp_mutex);
344
345 cancel_delayed_work_sync(&rdata->retry_work);
346
347 mutex_lock(&rdata->rp_mutex);
348
349 rdata->event = RPORT_EV_STOP;
350 queue_work(rport_event_queue, &rdata->event_work);
351
352 mutex_unlock(&rdata->rp_mutex);
353
b4c6f546 354out:
42e9a92f
RL
355 return 0;
356}
357
358/**
34f42a07 359 * fc_rport_enter_ready() - The rport is ready
42e9a92f
RL
360 * @rport: Fibre Channel remote port that is ready
361 *
362 * Locking Note: The rport lock is expected to be held before calling
363 * this routine.
364 */
365static void fc_rport_enter_ready(struct fc_rport *rport)
366{
367 struct fc_rport_libfc_priv *rdata = rport->dd_data;
368
369 fc_rport_state_enter(rport, RPORT_ST_READY);
370
7414705e 371 FC_RPORT_DBG(rport, "Port is Ready\n");
42e9a92f
RL
372
373 rdata->event = RPORT_EV_CREATED;
374 queue_work(rport_event_queue, &rdata->event_work);
375}
376
377/**
34f42a07 378 * fc_rport_timeout() - Handler for the retry_work timer.
42e9a92f
RL
379 * @work: The work struct of the fc_rport_libfc_priv
380 *
381 * Locking Note: Called without the rport lock held. This
382 * function will hold the rport lock, call an _enter_*
383 * function and then unlock the rport.
384 */
385static void fc_rport_timeout(struct work_struct *work)
386{
387 struct fc_rport_libfc_priv *rdata =
388 container_of(work, struct fc_rport_libfc_priv, retry_work.work);
389 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
390
391 mutex_lock(&rdata->rp_mutex);
392
393 switch (rdata->rp_state) {
394 case RPORT_ST_PLOGI:
395 fc_rport_enter_plogi(rport);
396 break;
397 case RPORT_ST_PRLI:
398 fc_rport_enter_prli(rport);
399 break;
400 case RPORT_ST_RTV:
401 fc_rport_enter_rtv(rport);
402 break;
403 case RPORT_ST_LOGO:
404 fc_rport_enter_logo(rport);
405 break;
406 case RPORT_ST_READY:
407 case RPORT_ST_INIT:
14194054 408 case RPORT_ST_DELETE:
42e9a92f
RL
409 break;
410 }
411
412 mutex_unlock(&rdata->rp_mutex);
413 put_device(&rport->dev);
414}
415
416/**
34f42a07 417 * fc_rport_error() - Error handler, called once retries have been exhausted
42e9a92f
RL
418 * @rport: The fc_rport object
419 * @fp: The frame pointer
420 *
42e9a92f
RL
421 * Locking Note: The rport lock is expected to be held before
422 * calling this routine
423 */
424static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
425{
426 struct fc_rport_libfc_priv *rdata = rport->dd_data;
42e9a92f 427
7414705e
RL
428 FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n",
429 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
42e9a92f 430
6755db1c
CL
431 switch (rdata->rp_state) {
432 case RPORT_ST_PLOGI:
433 case RPORT_ST_PRLI:
434 case RPORT_ST_LOGO:
435 rdata->event = RPORT_EV_FAILED;
14194054 436 fc_rport_state_enter(rport, RPORT_ST_DELETE);
6755db1c
CL
437 queue_work(rport_event_queue,
438 &rdata->event_work);
439 break;
440 case RPORT_ST_RTV:
441 fc_rport_enter_ready(rport);
442 break;
14194054 443 case RPORT_ST_DELETE:
6755db1c
CL
444 case RPORT_ST_READY:
445 case RPORT_ST_INIT:
446 break;
42e9a92f
RL
447 }
448}
449
6755db1c 450/**
34f42a07 451 * fc_rport_error_retry() - Error handler when retries are desired
6755db1c
CL
452 * @rport: The fc_rport object
453 * @fp: The frame pointer
454 *
455 * If the error was an exchange timeout retry immediately,
456 * otherwise wait for E_D_TOV.
457 *
458 * Locking Note: The rport lock is expected to be held before
459 * calling this routine
460 */
461static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
462{
463 struct fc_rport_libfc_priv *rdata = rport->dd_data;
464 unsigned long delay = FC_DEF_E_D_TOV;
465
466 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
467 if (PTR_ERR(fp) == -FC_EX_CLOSED)
468 return fc_rport_error(rport, fp);
469
a3666955 470 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
7414705e
RL
471 FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n",
472 PTR_ERR(fp), fc_rport_state(rport));
6755db1c
CL
473 rdata->retries++;
474 /* no additional delay on exchange timeouts */
475 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
476 delay = 0;
477 get_device(&rport->dev);
478 schedule_delayed_work(&rdata->retry_work, delay);
479 return;
480 }
481
482 return fc_rport_error(rport, fp);
483}
484
42e9a92f 485/**
34f42a07 486 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
42e9a92f
RL
487 * @sp: current sequence in the PLOGI exchange
488 * @fp: response frame
489 * @rp_arg: Fibre Channel remote port
490 *
491 * Locking Note: This function will be called without the rport lock
492 * held, but it will lock, call an _enter_* function or fc_rport_error
493 * and then unlock the rport.
494 */
495static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
496 void *rp_arg)
497{
498 struct fc_rport *rport = rp_arg;
499 struct fc_rport_libfc_priv *rdata = rport->dd_data;
500 struct fc_lport *lport = rdata->local_port;
a29e7646 501 struct fc_els_flogi *plp = NULL;
42e9a92f
RL
502 unsigned int tov;
503 u16 csp_seq;
504 u16 cssp_seq;
505 u8 op;
506
507 mutex_lock(&rdata->rp_mutex);
508
7414705e 509 FC_RPORT_DBG(rport, "Received a PLOGI response\n");
42e9a92f
RL
510
511 if (rdata->rp_state != RPORT_ST_PLOGI) {
7414705e
RL
512 FC_RPORT_DBG(rport, "Received a PLOGI response, but in state "
513 "%s\n", fc_rport_state(rport));
76f6804e
AJ
514 if (IS_ERR(fp))
515 goto err;
42e9a92f
RL
516 goto out;
517 }
518
76f6804e
AJ
519 if (IS_ERR(fp)) {
520 fc_rport_error_retry(rport, fp);
521 goto err;
522 }
523
42e9a92f
RL
524 op = fc_frame_payload_op(fp);
525 if (op == ELS_LS_ACC &&
526 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
527 rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
528 rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
529
530 tov = ntohl(plp->fl_csp.sp_e_d_tov);
531 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
532 tov /= 1000;
533 if (tov > rdata->e_d_tov)
534 rdata->e_d_tov = tov;
535 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
536 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
537 if (cssp_seq < csp_seq)
538 csp_seq = cssp_seq;
539 rdata->max_seq = csp_seq;
540 rport->maxframe_size =
541 fc_plogi_get_maxframe(plp, lport->mfs);
542
543 /*
544 * If the rport is one of the well known addresses
545 * we skip PRLI and RTV and go straight to READY.
546 */
547 if (rport->port_id >= FC_FID_DOM_MGR)
548 fc_rport_enter_ready(rport);
549 else
550 fc_rport_enter_prli(rport);
551 } else
6755db1c 552 fc_rport_error_retry(rport, fp);
42e9a92f
RL
553
554out:
555 fc_frame_free(fp);
556err:
557 mutex_unlock(&rdata->rp_mutex);
558 put_device(&rport->dev);
559}
560
561/**
34f42a07 562 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
42e9a92f
RL
563 * @rport: Fibre Channel remote port to send PLOGI to
564 *
565 * Locking Note: The rport lock is expected to be held before calling
566 * this routine.
567 */
568static void fc_rport_enter_plogi(struct fc_rport *rport)
569{
570 struct fc_rport_libfc_priv *rdata = rport->dd_data;
571 struct fc_lport *lport = rdata->local_port;
572 struct fc_frame *fp;
573
7414705e
RL
574 FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n",
575 fc_rport_state(rport));
42e9a92f
RL
576
577 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
578
579 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
580 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
581 if (!fp) {
6755db1c 582 fc_rport_error_retry(rport, fp);
42e9a92f
RL
583 return;
584 }
585 rdata->e_d_tov = lport->e_d_tov;
586
587 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
588 fc_rport_plogi_resp, rport, lport->e_d_tov))
6755db1c 589 fc_rport_error_retry(rport, fp);
42e9a92f
RL
590 else
591 get_device(&rport->dev);
592}
593
594/**
34f42a07 595 * fc_rport_prli_resp() - Process Login (PRLI) response handler
42e9a92f
RL
596 * @sp: current sequence in the PRLI exchange
597 * @fp: response frame
598 * @rp_arg: Fibre Channel remote port
599 *
600 * Locking Note: This function will be called without the rport lock
601 * held, but it will lock, call an _enter_* function or fc_rport_error
602 * and then unlock the rport.
603 */
604static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
605 void *rp_arg)
606{
607 struct fc_rport *rport = rp_arg;
608 struct fc_rport_libfc_priv *rdata = rport->dd_data;
609 struct {
610 struct fc_els_prli prli;
611 struct fc_els_spp spp;
612 } *pp;
613 u32 roles = FC_RPORT_ROLE_UNKNOWN;
614 u32 fcp_parm = 0;
615 u8 op;
616
617 mutex_lock(&rdata->rp_mutex);
618
7414705e 619 FC_RPORT_DBG(rport, "Received a PRLI response\n");
42e9a92f
RL
620
621 if (rdata->rp_state != RPORT_ST_PRLI) {
7414705e
RL
622 FC_RPORT_DBG(rport, "Received a PRLI response, but in state "
623 "%s\n", fc_rport_state(rport));
76f6804e
AJ
624 if (IS_ERR(fp))
625 goto err;
42e9a92f
RL
626 goto out;
627 }
628
76f6804e
AJ
629 if (IS_ERR(fp)) {
630 fc_rport_error_retry(rport, fp);
631 goto err;
632 }
633
42e9a92f
RL
634 op = fc_frame_payload_op(fp);
635 if (op == ELS_LS_ACC) {
636 pp = fc_frame_payload_get(fp, sizeof(*pp));
637 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
638 fcp_parm = ntohl(pp->spp.spp_params);
639 if (fcp_parm & FCP_SPPF_RETRY)
640 rdata->flags |= FC_RP_FLAGS_RETRY;
641 }
642
643 rport->supported_classes = FC_COS_CLASS3;
644 if (fcp_parm & FCP_SPPF_INIT_FCN)
645 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
646 if (fcp_parm & FCP_SPPF_TARG_FCN)
647 roles |= FC_RPORT_ROLE_FCP_TARGET;
648
649 rport->roles = roles;
650 fc_rport_enter_rtv(rport);
651
652 } else {
7414705e 653 FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n");
42e9a92f 654 rdata->event = RPORT_EV_FAILED;
14194054 655 fc_rport_state_enter(rport, RPORT_ST_DELETE);
42e9a92f
RL
656 queue_work(rport_event_queue, &rdata->event_work);
657 }
658
659out:
660 fc_frame_free(fp);
661err:
662 mutex_unlock(&rdata->rp_mutex);
663 put_device(&rport->dev);
664}
665
666/**
34f42a07 667 * fc_rport_logo_resp() - Logout (LOGO) response handler
42e9a92f
RL
668 * @sp: current sequence in the LOGO exchange
669 * @fp: response frame
670 * @rp_arg: Fibre Channel remote port
671 *
672 * Locking Note: This function will be called without the rport lock
673 * held, but it will lock, call an _enter_* function or fc_rport_error
674 * and then unlock the rport.
675 */
676static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
677 void *rp_arg)
678{
679 struct fc_rport *rport = rp_arg;
680 struct fc_rport_libfc_priv *rdata = rport->dd_data;
681 u8 op;
682
683 mutex_lock(&rdata->rp_mutex);
684
7414705e 685 FC_RPORT_DBG(rport, "Received a LOGO response\n");
42e9a92f 686
42e9a92f 687 if (rdata->rp_state != RPORT_ST_LOGO) {
7414705e
RL
688 FC_RPORT_DBG(rport, "Received a LOGO response, but in state "
689 "%s\n", fc_rport_state(rport));
76f6804e
AJ
690 if (IS_ERR(fp))
691 goto err;
42e9a92f
RL
692 goto out;
693 }
694
76f6804e
AJ
695 if (IS_ERR(fp)) {
696 fc_rport_error_retry(rport, fp);
697 goto err;
698 }
699
42e9a92f
RL
700 op = fc_frame_payload_op(fp);
701 if (op == ELS_LS_ACC) {
702 fc_rport_enter_rtv(rport);
703 } else {
7414705e 704 FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n");
42e9a92f 705 rdata->event = RPORT_EV_LOGO;
14194054 706 fc_rport_state_enter(rport, RPORT_ST_DELETE);
42e9a92f
RL
707 queue_work(rport_event_queue, &rdata->event_work);
708 }
709
710out:
711 fc_frame_free(fp);
712err:
713 mutex_unlock(&rdata->rp_mutex);
714 put_device(&rport->dev);
715}
716
717/**
34f42a07 718 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
42e9a92f
RL
719 * @rport: Fibre Channel remote port to send PRLI to
720 *
721 * Locking Note: The rport lock is expected to be held before calling
722 * this routine.
723 */
724static void fc_rport_enter_prli(struct fc_rport *rport)
725{
726 struct fc_rport_libfc_priv *rdata = rport->dd_data;
727 struct fc_lport *lport = rdata->local_port;
728 struct {
729 struct fc_els_prli prli;
730 struct fc_els_spp spp;
731 } *pp;
732 struct fc_frame *fp;
733
7414705e
RL
734 FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n",
735 fc_rport_state(rport));
42e9a92f
RL
736
737 fc_rport_state_enter(rport, RPORT_ST_PRLI);
738
739 fp = fc_frame_alloc(lport, sizeof(*pp));
740 if (!fp) {
6755db1c 741 fc_rport_error_retry(rport, fp);
42e9a92f
RL
742 return;
743 }
744
745 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
746 fc_rport_prli_resp, rport, lport->e_d_tov))
6755db1c 747 fc_rport_error_retry(rport, fp);
42e9a92f
RL
748 else
749 get_device(&rport->dev);
750}
751
752/**
34f42a07 753 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
42e9a92f
RL
754 * @sp: current sequence in the RTV exchange
755 * @fp: response frame
756 * @rp_arg: Fibre Channel remote port
757 *
758 * Many targets don't seem to support this.
759 *
760 * Locking Note: This function will be called without the rport lock
761 * held, but it will lock, call an _enter_* function or fc_rport_error
762 * and then unlock the rport.
763 */
764static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
765 void *rp_arg)
766{
767 struct fc_rport *rport = rp_arg;
768 struct fc_rport_libfc_priv *rdata = rport->dd_data;
769 u8 op;
770
771 mutex_lock(&rdata->rp_mutex);
772
7414705e 773 FC_RPORT_DBG(rport, "Received a RTV response\n");
42e9a92f
RL
774
775 if (rdata->rp_state != RPORT_ST_RTV) {
7414705e
RL
776 FC_RPORT_DBG(rport, "Received a RTV response, but in state "
777 "%s\n", fc_rport_state(rport));
76f6804e
AJ
778 if (IS_ERR(fp))
779 goto err;
42e9a92f
RL
780 goto out;
781 }
782
76f6804e
AJ
783 if (IS_ERR(fp)) {
784 fc_rport_error(rport, fp);
785 goto err;
786 }
787
42e9a92f
RL
788 op = fc_frame_payload_op(fp);
789 if (op == ELS_LS_ACC) {
790 struct fc_els_rtv_acc *rtv;
791 u32 toq;
792 u32 tov;
793
794 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
795 if (rtv) {
796 toq = ntohl(rtv->rtv_toq);
797 tov = ntohl(rtv->rtv_r_a_tov);
798 if (tov == 0)
799 tov = 1;
800 rdata->r_a_tov = tov;
801 tov = ntohl(rtv->rtv_e_d_tov);
802 if (toq & FC_ELS_RTV_EDRES)
803 tov /= 1000000;
804 if (tov == 0)
805 tov = 1;
806 rdata->e_d_tov = tov;
807 }
808 }
809
810 fc_rport_enter_ready(rport);
811
812out:
813 fc_frame_free(fp);
814err:
815 mutex_unlock(&rdata->rp_mutex);
816 put_device(&rport->dev);
817}
818
819/**
34f42a07 820 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
42e9a92f
RL
821 * @rport: Fibre Channel remote port to send RTV to
822 *
823 * Locking Note: The rport lock is expected to be held before calling
824 * this routine.
825 */
826static void fc_rport_enter_rtv(struct fc_rport *rport)
827{
828 struct fc_frame *fp;
829 struct fc_rport_libfc_priv *rdata = rport->dd_data;
830 struct fc_lport *lport = rdata->local_port;
831
7414705e
RL
832 FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n",
833 fc_rport_state(rport));
42e9a92f
RL
834
835 fc_rport_state_enter(rport, RPORT_ST_RTV);
836
837 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
838 if (!fp) {
6755db1c 839 fc_rport_error_retry(rport, fp);
42e9a92f
RL
840 return;
841 }
842
843 if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
844 fc_rport_rtv_resp, rport, lport->e_d_tov))
6755db1c 845 fc_rport_error_retry(rport, fp);
42e9a92f
RL
846 else
847 get_device(&rport->dev);
848}
849
850/**
34f42a07 851 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
42e9a92f
RL
852 * @rport: Fibre Channel remote port to send LOGO to
853 *
854 * Locking Note: The rport lock is expected to be held before calling
855 * this routine.
856 */
857static void fc_rport_enter_logo(struct fc_rport *rport)
858{
859 struct fc_rport_libfc_priv *rdata = rport->dd_data;
860 struct fc_lport *lport = rdata->local_port;
861 struct fc_frame *fp;
862
7414705e
RL
863 FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n",
864 fc_rport_state(rport));
42e9a92f
RL
865
866 fc_rport_state_enter(rport, RPORT_ST_LOGO);
867
868 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
869 if (!fp) {
6755db1c 870 fc_rport_error_retry(rport, fp);
42e9a92f
RL
871 return;
872 }
873
874 if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
875 fc_rport_logo_resp, rport, lport->e_d_tov))
6755db1c 876 fc_rport_error_retry(rport, fp);
42e9a92f
RL
877 else
878 get_device(&rport->dev);
879}
880
881
882/**
34f42a07 883 * fc_rport_recv_req() - Receive a request from a rport
42e9a92f
RL
884 * @sp: current sequence in the PLOGI exchange
885 * @fp: response frame
886 * @rp_arg: Fibre Channel remote port
887 *
888 * Locking Note: Called without the rport lock held. This
889 * function will hold the rport lock, call an _enter_*
890 * function and then unlock the rport.
891 */
892void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
893 struct fc_rport *rport)
894{
895 struct fc_rport_libfc_priv *rdata = rport->dd_data;
896 struct fc_lport *lport = rdata->local_port;
897
898 struct fc_frame_header *fh;
899 struct fc_seq_els_data els_data;
900 u8 op;
901
902 mutex_lock(&rdata->rp_mutex);
903
904 els_data.fp = NULL;
905 els_data.explan = ELS_EXPL_NONE;
906 els_data.reason = ELS_RJT_NONE;
907
908 fh = fc_frame_header_get(fp);
909
910 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
911 op = fc_frame_payload_op(fp);
912 switch (op) {
913 case ELS_PLOGI:
914 fc_rport_recv_plogi_req(rport, sp, fp);
915 break;
916 case ELS_PRLI:
917 fc_rport_recv_prli_req(rport, sp, fp);
918 break;
919 case ELS_PRLO:
920 fc_rport_recv_prlo_req(rport, sp, fp);
921 break;
922 case ELS_LOGO:
923 fc_rport_recv_logo_req(rport, sp, fp);
924 break;
925 case ELS_RRQ:
926 els_data.fp = fp;
927 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
928 break;
929 case ELS_REC:
930 els_data.fp = fp;
931 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
932 break;
933 default:
934 els_data.reason = ELS_RJT_UNSUP;
935 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
936 break;
937 }
938 }
939
940 mutex_unlock(&rdata->rp_mutex);
941}
942
943/**
34f42a07 944 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
42e9a92f
RL
945 * @rport: Fibre Channel remote port that initiated PLOGI
946 * @sp: current sequence in the PLOGI exchange
947 * @fp: PLOGI request frame
948 *
949 * Locking Note: The rport lock is exected to be held before calling
950 * this function.
951 */
952static void fc_rport_recv_plogi_req(struct fc_rport *rport,
953 struct fc_seq *sp, struct fc_frame *rx_fp)
954{
955 struct fc_rport_libfc_priv *rdata = rport->dd_data;
956 struct fc_lport *lport = rdata->local_port;
957 struct fc_frame *fp = rx_fp;
958 struct fc_exch *ep;
959 struct fc_frame_header *fh;
960 struct fc_els_flogi *pl;
961 struct fc_seq_els_data rjt_data;
962 u32 sid;
963 u64 wwpn;
964 u64 wwnn;
965 enum fc_els_rjt_reason reject = 0;
966 u32 f_ctl;
967 rjt_data.fp = NULL;
968
969 fh = fc_frame_header_get(fp);
970
7414705e
RL
971 FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n",
972 fc_rport_state(rport));
42e9a92f
RL
973
974 sid = ntoh24(fh->fh_s_id);
975 pl = fc_frame_payload_get(fp, sizeof(*pl));
976 if (!pl) {
7414705e 977 FC_RPORT_DBG(rport, "Received PLOGI too short\n");
42e9a92f
RL
978 WARN_ON(1);
979 /* XXX TBD: send reject? */
980 fc_frame_free(fp);
981 return;
982 }
983 wwpn = get_unaligned_be64(&pl->fl_wwpn);
984 wwnn = get_unaligned_be64(&pl->fl_wwnn);
985
986 /*
987 * If the session was just created, possibly due to the incoming PLOGI,
988 * set the state appropriately and accept the PLOGI.
989 *
990 * If we had also sent a PLOGI, and if the received PLOGI is from a
991 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
992 * "command already in progress".
993 *
994 * XXX TBD: If the session was ready before, the PLOGI should result in
995 * all outstanding exchanges being reset.
996 */
997 switch (rdata->rp_state) {
998 case RPORT_ST_INIT:
7414705e
RL
999 FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT "
1000 "- reject\n", (unsigned long long)wwpn);
42e9a92f
RL
1001 reject = ELS_RJT_UNSUP;
1002 break;
1003 case RPORT_ST_PLOGI:
7414705e
RL
1004 FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n",
1005 rdata->rp_state);
42e9a92f
RL
1006 if (wwpn < lport->wwpn)
1007 reject = ELS_RJT_INPROG;
1008 break;
1009 case RPORT_ST_PRLI:
1010 case RPORT_ST_READY:
7414705e
RL
1011 FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d "
1012 "- ignored for now\n", rdata->rp_state);
42e9a92f
RL
1013 /* XXX TBD - should reset */
1014 break;
14194054 1015 case RPORT_ST_DELETE:
42e9a92f 1016 default:
7414705e
RL
1017 FC_RPORT_DBG(rport, "Received PLOGI in unexpected "
1018 "state %d\n", rdata->rp_state);
b4c6f546
AJ
1019 fc_frame_free(fp);
1020 return;
42e9a92f
RL
1021 break;
1022 }
1023
1024 if (reject) {
1025 rjt_data.reason = reject;
1026 rjt_data.explan = ELS_EXPL_NONE;
1027 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1028 fc_frame_free(fp);
1029 } else {
1030 fp = fc_frame_alloc(lport, sizeof(*pl));
1031 if (fp == NULL) {
1032 fp = rx_fp;
1033 rjt_data.reason = ELS_RJT_UNAB;
1034 rjt_data.explan = ELS_EXPL_NONE;
1035 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1036 fc_frame_free(fp);
1037 } else {
1038 sp = lport->tt.seq_start_next(sp);
1039 WARN_ON(!sp);
1040 fc_rport_set_name(rport, wwpn, wwnn);
1041
1042 /*
1043 * Get session payload size from incoming PLOGI.
1044 */
1045 rport->maxframe_size =
1046 fc_plogi_get_maxframe(pl, lport->mfs);
1047 fc_frame_free(rx_fp);
1048 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1049
1050 /*
1051 * Send LS_ACC. If this fails,
1052 * the originator should retry.
1053 */
1054 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1055 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1056 ep = fc_seq_exch(sp);
1057 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1058 FC_TYPE_ELS, f_ctl, 0);
1059 lport->tt.seq_send(lport, sp, fp);
1060 if (rdata->rp_state == RPORT_ST_PLOGI)
1061 fc_rport_enter_prli(rport);
1062 }
1063 }
1064}
1065
1066/**
34f42a07 1067 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
42e9a92f
RL
1068 * @rport: Fibre Channel remote port that initiated PRLI
1069 * @sp: current sequence in the PRLI exchange
1070 * @fp: PRLI request frame
1071 *
1072 * Locking Note: The rport lock is exected to be held before calling
1073 * this function.
1074 */
1075static void fc_rport_recv_prli_req(struct fc_rport *rport,
1076 struct fc_seq *sp, struct fc_frame *rx_fp)
1077{
1078 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1079 struct fc_lport *lport = rdata->local_port;
1080 struct fc_exch *ep;
1081 struct fc_frame *fp;
1082 struct fc_frame_header *fh;
1083 struct {
1084 struct fc_els_prli prli;
1085 struct fc_els_spp spp;
1086 } *pp;
1087 struct fc_els_spp *rspp; /* request service param page */
1088 struct fc_els_spp *spp; /* response spp */
1089 unsigned int len;
1090 unsigned int plen;
1091 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1092 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1093 enum fc_els_spp_resp resp;
1094 struct fc_seq_els_data rjt_data;
1095 u32 f_ctl;
1096 u32 fcp_parm;
1097 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1098 rjt_data.fp = NULL;
1099
1100 fh = fc_frame_header_get(rx_fp);
1101
7414705e
RL
1102 FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n",
1103 fc_rport_state(rport));
42e9a92f
RL
1104
1105 switch (rdata->rp_state) {
1106 case RPORT_ST_PRLI:
1107 case RPORT_ST_READY:
1108 reason = ELS_RJT_NONE;
1109 break;
1110 default:
b4c6f546
AJ
1111 fc_frame_free(rx_fp);
1112 return;
42e9a92f
RL
1113 break;
1114 }
1115 len = fr_len(rx_fp) - sizeof(*fh);
1116 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1117 if (pp == NULL) {
1118 reason = ELS_RJT_PROT;
1119 explan = ELS_EXPL_INV_LEN;
1120 } else {
1121 plen = ntohs(pp->prli.prli_len);
1122 if ((plen % 4) != 0 || plen > len) {
1123 reason = ELS_RJT_PROT;
1124 explan = ELS_EXPL_INV_LEN;
1125 } else if (plen < len) {
1126 len = plen;
1127 }
1128 plen = pp->prli.prli_spp_len;
1129 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1130 plen > len || len < sizeof(*pp)) {
1131 reason = ELS_RJT_PROT;
1132 explan = ELS_EXPL_INV_LEN;
1133 }
1134 rspp = &pp->spp;
1135 }
1136 if (reason != ELS_RJT_NONE ||
1137 (fp = fc_frame_alloc(lport, len)) == NULL) {
1138 rjt_data.reason = reason;
1139 rjt_data.explan = explan;
1140 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1141 } else {
1142 sp = lport->tt.seq_start_next(sp);
1143 WARN_ON(!sp);
1144 pp = fc_frame_payload_get(fp, len);
1145 WARN_ON(!pp);
1146 memset(pp, 0, len);
1147 pp->prli.prli_cmd = ELS_LS_ACC;
1148 pp->prli.prli_spp_len = plen;
1149 pp->prli.prli_len = htons(len);
1150 len -= sizeof(struct fc_els_prli);
1151
1152 /*
1153 * Go through all the service parameter pages and build
1154 * response. If plen indicates longer SPP than standard,
1155 * use that. The entire response has been pre-cleared above.
1156 */
1157 spp = &pp->spp;
1158 while (len >= plen) {
1159 spp->spp_type = rspp->spp_type;
1160 spp->spp_type_ext = rspp->spp_type_ext;
1161 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1162 resp = FC_SPP_RESP_ACK;
1163 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1164 resp = FC_SPP_RESP_NO_PA;
1165 switch (rspp->spp_type) {
1166 case 0: /* common to all FC-4 types */
1167 break;
1168 case FC_TYPE_FCP:
1169 fcp_parm = ntohl(rspp->spp_params);
1170 if (fcp_parm * FCP_SPPF_RETRY)
1171 rdata->flags |= FC_RP_FLAGS_RETRY;
1172 rport->supported_classes = FC_COS_CLASS3;
1173 if (fcp_parm & FCP_SPPF_INIT_FCN)
1174 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1175 if (fcp_parm & FCP_SPPF_TARG_FCN)
1176 roles |= FC_RPORT_ROLE_FCP_TARGET;
1177 rport->roles = roles;
1178
1179 spp->spp_params =
1180 htonl(lport->service_params);
1181 break;
1182 default:
1183 resp = FC_SPP_RESP_INVL;
1184 break;
1185 }
1186 spp->spp_flags |= resp;
1187 len -= plen;
1188 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1189 spp = (struct fc_els_spp *)((char *)spp + plen);
1190 }
1191
1192 /*
1193 * Send LS_ACC. If this fails, the originator should retry.
1194 */
1195 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1196 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1197 ep = fc_seq_exch(sp);
1198 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1199 FC_TYPE_ELS, f_ctl, 0);
1200 lport->tt.seq_send(lport, sp, fp);
1201
1202 /*
1203 * Get lock and re-check state.
1204 */
1205 switch (rdata->rp_state) {
1206 case RPORT_ST_PRLI:
1207 fc_rport_enter_ready(rport);
1208 break;
1209 case RPORT_ST_READY:
1210 break;
1211 default:
1212 break;
1213 }
1214 }
1215 fc_frame_free(rx_fp);
1216}
1217
1218/**
34f42a07 1219 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
42e9a92f
RL
1220 * @rport: Fibre Channel remote port that initiated PRLO
1221 * @sp: current sequence in the PRLO exchange
1222 * @fp: PRLO request frame
1223 *
1224 * Locking Note: The rport lock is exected to be held before calling
1225 * this function.
1226 */
1227static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1228 struct fc_frame *fp)
1229{
1230 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1231 struct fc_lport *lport = rdata->local_port;
1232
1233 struct fc_frame_header *fh;
1234 struct fc_seq_els_data rjt_data;
1235
1236 fh = fc_frame_header_get(fp);
1237
7414705e
RL
1238 FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n",
1239 fc_rport_state(rport));
42e9a92f 1240
14194054 1241 if (rdata->rp_state == RPORT_ST_DELETE) {
b4c6f546
AJ
1242 fc_frame_free(fp);
1243 return;
1244 }
1245
42e9a92f
RL
1246 rjt_data.fp = NULL;
1247 rjt_data.reason = ELS_RJT_UNAB;
1248 rjt_data.explan = ELS_EXPL_NONE;
1249 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1250 fc_frame_free(fp);
1251}
1252
1253/**
34f42a07 1254 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
42e9a92f
RL
1255 * @rport: Fibre Channel remote port that initiated LOGO
1256 * @sp: current sequence in the LOGO exchange
1257 * @fp: LOGO request frame
1258 *
1259 * Locking Note: The rport lock is exected to be held before calling
1260 * this function.
1261 */
1262static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
1263 struct fc_frame *fp)
1264{
1265 struct fc_frame_header *fh;
1266 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1267 struct fc_lport *lport = rdata->local_port;
1268
1269 fh = fc_frame_header_get(fp);
1270
7414705e
RL
1271 FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n",
1272 fc_rport_state(rport));
42e9a92f 1273
14194054 1274 if (rdata->rp_state == RPORT_ST_DELETE) {
b4c6f546
AJ
1275 fc_frame_free(fp);
1276 return;
1277 }
1278
42e9a92f 1279 rdata->event = RPORT_EV_LOGO;
14194054 1280 fc_rport_state_enter(rport, RPORT_ST_DELETE);
42e9a92f
RL
1281 queue_work(rport_event_queue, &rdata->event_work);
1282
1283 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1284 fc_frame_free(fp);
1285}
1286
1287static void fc_rport_flush_queue(void)
1288{
1289 flush_workqueue(rport_event_queue);
1290}
1291
42e9a92f
RL
1292int fc_rport_init(struct fc_lport *lport)
1293{
5101ff99
RL
1294 if (!lport->tt.rport_create)
1295 lport->tt.rport_create = fc_rport_rogue_create;
1296
42e9a92f
RL
1297 if (!lport->tt.rport_login)
1298 lport->tt.rport_login = fc_rport_login;
1299
1300 if (!lport->tt.rport_logoff)
1301 lport->tt.rport_logoff = fc_rport_logoff;
1302
1303 if (!lport->tt.rport_recv_req)
1304 lport->tt.rport_recv_req = fc_rport_recv_req;
1305
1306 if (!lport->tt.rport_flush_queue)
1307 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1308
1309 return 0;
1310}
1311EXPORT_SYMBOL(fc_rport_init);
1312
b0d428ad 1313int fc_setup_rport(void)
42e9a92f
RL
1314{
1315 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1316 if (!rport_event_queue)
1317 return -ENOMEM;
1318 return 0;
1319}
1320EXPORT_SYMBOL(fc_setup_rport);
1321
b0d428ad 1322void fc_destroy_rport(void)
42e9a92f
RL
1323{
1324 destroy_workqueue(rport_event_queue);
1325}
1326EXPORT_SYMBOL(fc_destroy_rport);
1327
1328void fc_rport_terminate_io(struct fc_rport *rport)
1329{
1330 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1331 struct fc_lport *lport = rdata->local_port;
1332
1f6ff364
AJ
1333 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1334 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
42e9a92f
RL
1335}
1336EXPORT_SYMBOL(fc_rport_terminate_io);