[SCSI] libfc: Remove the FC_EM_DBG macro
[linux-2.6-block.git] / drivers / scsi / libfc / fc_exch.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22/*
23 * Fibre Channel exchange and sequence handling.
24 */
25
26#include <linux/timer.h>
27#include <linux/gfp.h>
28#include <linux/err.h>
29
30#include <scsi/fc/fc_fc2.h>
31
32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h>
34
7414705e 35static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
42e9a92f
RL
36
37/*
38 * Structure and function definitions for managing Fibre Channel Exchanges
39 * and Sequences.
40 *
41 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
42 *
43 * fc_exch_mgr holds the exchange state for an N port
44 *
45 * fc_exch holds state for one exchange and links to its active sequence.
46 *
47 * fc_seq holds the state for an individual sequence.
48 */
49
50/*
51 * Exchange manager.
52 *
53 * This structure is the center for creating exchanges and sequences.
54 * It manages the allocation of exchange IDs.
55 */
56struct fc_exch_mgr {
57 enum fc_class class; /* default class for sequences */
96316099 58 struct kref kref; /* exchange mgr reference count */
42e9a92f
RL
59 spinlock_t em_lock; /* exchange manager lock,
60 must be taken before ex_lock */
61 u16 last_xid; /* last allocated exchange ID */
62 u16 min_xid; /* min exchange ID */
63 u16 max_xid; /* max exchange ID */
64 u16 max_read; /* max exchange ID for read */
65 u16 last_read; /* last xid allocated for read */
66 u32 total_exches; /* total allocated exchanges */
67 struct list_head ex_list; /* allocated exchanges list */
68 struct fc_lport *lp; /* fc device instance */
69 mempool_t *ep_pool; /* reserve ep's */
70
71 /*
72 * currently exchange mgr stats are updated but not used.
73 * either stats can be expose via sysfs or remove them
74 * all together if not used XXX
75 */
76 struct {
77 atomic_t no_free_exch;
78 atomic_t no_free_exch_xid;
79 atomic_t xid_not_found;
80 atomic_t xid_busy;
81 atomic_t seq_not_found;
82 atomic_t non_bls_resp;
83 } stats;
84 struct fc_exch **exches; /* for exch pointers indexed by xid */
85};
86#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
87
96316099
VD
88struct fc_exch_mgr_anchor {
89 struct list_head ema_list;
90 struct fc_exch_mgr *mp;
91 bool (*match)(struct fc_frame *);
92};
93
42e9a92f
RL
94static void fc_exch_rrq(struct fc_exch *);
95static void fc_seq_ls_acc(struct fc_seq *);
96static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
97 enum fc_els_rjt_explan);
98static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
99static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
100static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
101
102/*
103 * Internal implementation notes.
104 *
105 * The exchange manager is one by default in libfc but LLD may choose
106 * to have one per CPU. The sequence manager is one per exchange manager
107 * and currently never separated.
108 *
109 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
110 * assigned by the Sequence Initiator that shall be unique for a specific
111 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
112 * qualified by exchange ID, which one might think it would be.
113 * In practice this limits the number of open sequences and exchanges to 256
114 * per session. For most targets we could treat this limit as per exchange.
115 *
116 * The exchange and its sequence are freed when the last sequence is received.
117 * It's possible for the remote port to leave an exchange open without
118 * sending any sequences.
119 *
120 * Notes on reference counts:
121 *
122 * Exchanges are reference counted and exchange gets freed when the reference
123 * count becomes zero.
124 *
125 * Timeouts:
126 * Sequences are timed out for E_D_TOV and R_A_TOV.
127 *
128 * Sequence event handling:
129 *
130 * The following events may occur on initiator sequences:
131 *
132 * Send.
133 * For now, the whole thing is sent.
134 * Receive ACK
135 * This applies only to class F.
136 * The sequence is marked complete.
137 * ULP completion.
138 * The upper layer calls fc_exch_done() when done
139 * with exchange and sequence tuple.
140 * RX-inferred completion.
141 * When we receive the next sequence on the same exchange, we can
142 * retire the previous sequence ID. (XXX not implemented).
143 * Timeout.
144 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
145 * E_D_TOV causes abort and calls upper layer response handler
146 * with FC_EX_TIMEOUT error.
147 * Receive RJT
148 * XXX defer.
149 * Send ABTS
150 * On timeout.
151 *
152 * The following events may occur on recipient sequences:
153 *
154 * Receive
155 * Allocate sequence for first frame received.
156 * Hold during receive handler.
157 * Release when final frame received.
158 * Keep status of last N of these for the ELS RES command. XXX TBD.
159 * Receive ABTS
160 * Deallocate sequence
161 * Send RJT
162 * Deallocate
163 *
164 * For now, we neglect conditions where only part of a sequence was
165 * received or transmitted, or where out-of-order receipt is detected.
166 */
167
168/*
169 * Locking notes:
170 *
171 * The EM code run in a per-CPU worker thread.
172 *
173 * To protect against concurrency between a worker thread code and timers,
174 * sequence allocation and deallocation must be locked.
175 * - exchange refcnt can be done atomicly without locks.
176 * - sequence allocation must be locked by exch lock.
177 * - If the em_lock and ex_lock must be taken at the same time, then the
178 * em_lock must be taken before the ex_lock.
179 */
180
181/*
182 * opcode names for debugging.
183 */
184static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
185
186#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
187
188static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
189 unsigned int max_index)
190{
191 const char *name = NULL;
192
193 if (op < max_index)
194 name = table[op];
195 if (!name)
196 name = "unknown";
197 return name;
198}
199
200static const char *fc_exch_rctl_name(unsigned int op)
201{
202 return fc_exch_name_lookup(op, fc_exch_rctl_names,
203 FC_TABLE_SIZE(fc_exch_rctl_names));
204}
205
206/*
207 * Hold an exchange - keep it from being freed.
208 */
209static void fc_exch_hold(struct fc_exch *ep)
210{
211 atomic_inc(&ep->ex_refcnt);
212}
213
214/*
215 * setup fc hdr by initializing few more FC header fields and sof/eof.
216 * Initialized fields by this func:
217 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
218 * - sof and eof
219 */
220static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
221 u32 f_ctl)
222{
223 struct fc_frame_header *fh = fc_frame_header_get(fp);
224 u16 fill;
225
226 fr_sof(fp) = ep->class;
227 if (ep->seq.cnt)
228 fr_sof(fp) = fc_sof_normal(ep->class);
229
230 if (f_ctl & FC_FC_END_SEQ) {
231 fr_eof(fp) = FC_EOF_T;
232 if (fc_sof_needs_ack(ep->class))
233 fr_eof(fp) = FC_EOF_N;
234 /*
235 * Form f_ctl.
236 * The number of fill bytes to make the length a 4-byte
237 * multiple is the low order 2-bits of the f_ctl.
238 * The fill itself will have been cleared by the frame
239 * allocation.
240 * After this, the length will be even, as expected by
241 * the transport.
242 */
243 fill = fr_len(fp) & 3;
244 if (fill) {
245 fill = 4 - fill;
246 /* TODO, this may be a problem with fragmented skb */
247 skb_put(fp_skb(fp), fill);
248 hton24(fh->fh_f_ctl, f_ctl | fill);
249 }
250 } else {
251 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
252 fr_eof(fp) = FC_EOF_N;
253 }
254
255 /*
256 * Initialize remainig fh fields
257 * from fc_fill_fc_hdr
258 */
259 fh->fh_ox_id = htons(ep->oxid);
260 fh->fh_rx_id = htons(ep->rxid);
261 fh->fh_seq_id = ep->seq.id;
262 fh->fh_seq_cnt = htons(ep->seq.cnt);
263}
264
265
266/*
267 * Release a reference to an exchange.
268 * If the refcnt goes to zero and the exchange is complete, it is freed.
269 */
270static void fc_exch_release(struct fc_exch *ep)
271{
272 struct fc_exch_mgr *mp;
273
274 if (atomic_dec_and_test(&ep->ex_refcnt)) {
275 mp = ep->em;
276 if (ep->destructor)
277 ep->destructor(&ep->seq, ep->arg);
278 if (ep->lp->tt.exch_put)
279 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
aa6cd29b 280 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
42e9a92f
RL
281 mempool_free(ep, mp->ep_pool);
282 }
283}
284
285static int fc_exch_done_locked(struct fc_exch *ep)
286{
287 int rc = 1;
288
289 /*
290 * We must check for completion in case there are two threads
291 * tyring to complete this. But the rrq code will reuse the
292 * ep, and in that case we only clear the resp and set it as
293 * complete, so it can be reused by the timer to send the rrq.
294 */
295 ep->resp = NULL;
296 if (ep->state & FC_EX_DONE)
297 return rc;
298 ep->esb_stat |= ESB_ST_COMPLETE;
299
300 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
301 ep->state |= FC_EX_DONE;
302 if (cancel_delayed_work(&ep->timeout_work))
303 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
304 rc = 0;
305 }
306 return rc;
307}
308
309static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
310{
311 struct fc_exch_mgr *mp;
312
313 mp = ep->em;
314 spin_lock_bh(&mp->em_lock);
315 WARN_ON(mp->total_exches <= 0);
316 mp->total_exches--;
317 mp->exches[ep->xid - mp->min_xid] = NULL;
318 list_del(&ep->ex_list);
319 spin_unlock_bh(&mp->em_lock);
320 fc_exch_release(ep); /* drop hold for exch in mp */
321}
322
323/*
324 * Internal version of fc_exch_timer_set - used with lock held.
325 */
326static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
327 unsigned int timer_msec)
328{
329 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
330 return;
331
7414705e
RL
332 FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n");
333
42e9a92f
RL
334 if (schedule_delayed_work(&ep->timeout_work,
335 msecs_to_jiffies(timer_msec)))
336 fc_exch_hold(ep); /* hold for timer */
337}
338
339/*
340 * Set timer for an exchange.
341 * The time is a minimum delay in milliseconds until the timer fires.
342 * Used for upper level protocols to time out the exchange.
343 * The timer is cancelled when it fires or when the exchange completes.
344 * Returns non-zero if a timer couldn't be allocated.
345 */
346static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
347{
348 spin_lock_bh(&ep->ex_lock);
349 fc_exch_timer_set_locked(ep, timer_msec);
350 spin_unlock_bh(&ep->ex_lock);
351}
352
353int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
354{
355 struct fc_seq *sp;
356 struct fc_exch *ep;
357 struct fc_frame *fp;
358 int error;
359
360 ep = fc_seq_exch(req_sp);
361
362 spin_lock_bh(&ep->ex_lock);
363 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
364 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
365 spin_unlock_bh(&ep->ex_lock);
366 return -ENXIO;
367 }
368
369 /*
370 * Send the abort on a new sequence if possible.
371 */
372 sp = fc_seq_start_next_locked(&ep->seq);
373 if (!sp) {
374 spin_unlock_bh(&ep->ex_lock);
375 return -ENOMEM;
376 }
377
378 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
379 if (timer_msec)
380 fc_exch_timer_set_locked(ep, timer_msec);
381 spin_unlock_bh(&ep->ex_lock);
382
383 /*
384 * If not logged into the fabric, don't send ABTS but leave
385 * sequence active until next timeout.
386 */
387 if (!ep->sid)
388 return 0;
389
390 /*
391 * Send an abort for the sequence that timed out.
392 */
393 fp = fc_frame_alloc(ep->lp, 0);
394 if (fp) {
395 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
396 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
397 error = fc_seq_send(ep->lp, sp, fp);
398 } else
399 error = -ENOBUFS;
400 return error;
401}
402EXPORT_SYMBOL(fc_seq_exch_abort);
403
404/*
405 * Exchange timeout - handle exchange timer expiration.
406 * The timer will have been cancelled before this is called.
407 */
408static void fc_exch_timeout(struct work_struct *work)
409{
410 struct fc_exch *ep = container_of(work, struct fc_exch,
411 timeout_work.work);
412 struct fc_seq *sp = &ep->seq;
413 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
414 void *arg;
415 u32 e_stat;
416 int rc = 1;
417
418 spin_lock_bh(&ep->ex_lock);
419 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
420 goto unlock;
421
422 e_stat = ep->esb_stat;
423 if (e_stat & ESB_ST_COMPLETE) {
424 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
a0cc1ecc 425 spin_unlock_bh(&ep->ex_lock);
42e9a92f
RL
426 if (e_stat & ESB_ST_REC_QUAL)
427 fc_exch_rrq(ep);
42e9a92f
RL
428 goto done;
429 } else {
430 resp = ep->resp;
431 arg = ep->arg;
432 ep->resp = NULL;
433 if (e_stat & ESB_ST_ABNORMAL)
434 rc = fc_exch_done_locked(ep);
435 spin_unlock_bh(&ep->ex_lock);
436 if (!rc)
437 fc_exch_mgr_delete_ep(ep);
438 if (resp)
439 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
440 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
441 goto done;
442 }
443unlock:
444 spin_unlock_bh(&ep->ex_lock);
445done:
446 /*
447 * This release matches the hold taken when the timer was set.
448 */
449 fc_exch_release(ep);
450}
451
452/*
453 * Allocate a sequence.
454 *
455 * We don't support multiple originated sequences on the same exchange.
456 * By implication, any previously originated sequence on this exchange
457 * is complete, and we reallocate the same sequence.
458 */
459static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
460{
461 struct fc_seq *sp;
462
463 sp = &ep->seq;
464 sp->ssb_stat = 0;
465 sp->cnt = 0;
466 sp->id = seq_id;
467 return sp;
468}
469
470/*
471 * fc_em_alloc_xid - returns an xid based on request type
472 * @lp : ptr to associated lport
473 * @fp : ptr to the assocated frame
474 *
475 * check the associated fc_fsp_pkt to get scsi command type and
476 * command direction to decide from which range this exch id
477 * will be allocated from.
478 *
479 * Returns : 0 or an valid xid
480 */
481static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
482{
483 u16 xid, min, max;
484 u16 *plast;
485 struct fc_exch *ep = NULL;
486
487 if (mp->max_read) {
b277d2aa 488 if (fc_fcp_is_read(fr_fsp(fp))) {
42e9a92f
RL
489 min = mp->min_xid;
490 max = mp->max_read;
491 plast = &mp->last_read;
492 } else {
493 min = mp->max_read + 1;
494 max = mp->max_xid;
495 plast = &mp->last_xid;
496 }
497 } else {
498 min = mp->min_xid;
499 max = mp->max_xid;
500 plast = &mp->last_xid;
501 }
502 xid = *plast;
503 do {
504 xid = (xid == max) ? min : xid + 1;
505 ep = mp->exches[xid - mp->min_xid];
506 } while ((ep != NULL) && (xid != *plast));
507
508 if (unlikely(ep))
509 xid = 0;
510 else
511 *plast = xid;
512
513 return xid;
514}
515
516/*
517 * fc_exch_alloc - allocate an exchange.
518 * @mp : ptr to the exchange manager
519 * @xid: input xid
520 *
521 * if xid is supplied zero then assign next free exchange ID
522 * from exchange manager, otherwise use supplied xid.
523 * Returns with exch lock held.
524 */
525struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
526 struct fc_frame *fp, u16 xid)
527{
528 struct fc_exch *ep;
529
530 /* allocate memory for exchange */
531 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
532 if (!ep) {
533 atomic_inc(&mp->stats.no_free_exch);
534 goto out;
535 }
536 memset(ep, 0, sizeof(*ep));
537
538 spin_lock_bh(&mp->em_lock);
539 /* alloc xid if input xid 0 */
540 if (!xid) {
541 /* alloc a new xid */
542 xid = fc_em_alloc_xid(mp, fp);
543 if (!xid) {
7414705e 544 printk(KERN_WARNING "libfc: Failed to allocate an exhange\n");
42e9a92f
RL
545 goto err;
546 }
547 }
548
549 fc_exch_hold(ep); /* hold for exch in mp */
550 spin_lock_init(&ep->ex_lock);
551 /*
552 * Hold exch lock for caller to prevent fc_exch_reset()
553 * from releasing exch while fc_exch_alloc() caller is
554 * still working on exch.
555 */
556 spin_lock_bh(&ep->ex_lock);
557
558 mp->exches[xid - mp->min_xid] = ep;
559 list_add_tail(&ep->ex_list, &mp->ex_list);
560 fc_seq_alloc(ep, ep->seq_id++);
561 mp->total_exches++;
562 spin_unlock_bh(&mp->em_lock);
563
564 /*
565 * update exchange
566 */
567 ep->oxid = ep->xid = xid;
568 ep->em = mp;
569 ep->lp = mp->lp;
570 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
571 ep->rxid = FC_XID_UNKNOWN;
572 ep->class = mp->class;
573 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
574out:
575 return ep;
576err:
577 spin_unlock_bh(&mp->em_lock);
578 atomic_inc(&mp->stats.no_free_exch_xid);
579 mempool_free(ep, mp->ep_pool);
580 return NULL;
581}
582EXPORT_SYMBOL(fc_exch_alloc);
583
584/*
585 * Lookup and hold an exchange.
586 */
587static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
588{
589 struct fc_exch *ep = NULL;
590
591 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
592 spin_lock_bh(&mp->em_lock);
593 ep = mp->exches[xid - mp->min_xid];
594 if (ep) {
595 fc_exch_hold(ep);
596 WARN_ON(ep->xid != xid);
597 }
598 spin_unlock_bh(&mp->em_lock);
599 }
600 return ep;
601}
602
603void fc_exch_done(struct fc_seq *sp)
604{
605 struct fc_exch *ep = fc_seq_exch(sp);
606 int rc;
607
608 spin_lock_bh(&ep->ex_lock);
609 rc = fc_exch_done_locked(ep);
610 spin_unlock_bh(&ep->ex_lock);
611 if (!rc)
612 fc_exch_mgr_delete_ep(ep);
613}
614EXPORT_SYMBOL(fc_exch_done);
615
616/*
617 * Allocate a new exchange as responder.
618 * Sets the responder ID in the frame header.
619 */
620static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
621{
622 struct fc_exch *ep;
623 struct fc_frame_header *fh;
42e9a92f
RL
624
625 ep = mp->lp->tt.exch_get(mp->lp, fp);
626 if (ep) {
627 ep->class = fc_frame_class(fp);
628
629 /*
630 * Set EX_CTX indicating we're responding on this exchange.
631 */
632 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
633 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
634 fh = fc_frame_header_get(fp);
635 ep->sid = ntoh24(fh->fh_d_id);
636 ep->did = ntoh24(fh->fh_s_id);
637 ep->oid = ep->did;
638
639 /*
640 * Allocated exchange has placed the XID in the
641 * originator field. Move it to the responder field,
642 * and set the originator XID from the frame.
643 */
644 ep->rxid = ep->xid;
645 ep->oxid = ntohs(fh->fh_ox_id);
646 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
647 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
648 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
649
42e9a92f
RL
650 fc_exch_hold(ep); /* hold for caller */
651 spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */
652 }
653 return ep;
654}
655
656/*
657 * Find a sequence for receive where the other end is originating the sequence.
658 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
659 * on the ep that should be released by the caller.
660 */
b2ab99c9
RL
661static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp,
662 struct fc_frame *fp)
42e9a92f
RL
663{
664 struct fc_frame_header *fh = fc_frame_header_get(fp);
665 struct fc_exch *ep = NULL;
666 struct fc_seq *sp = NULL;
667 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
668 u32 f_ctl;
669 u16 xid;
670
671 f_ctl = ntoh24(fh->fh_f_ctl);
672 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
673
674 /*
675 * Lookup or create the exchange if we will be creating the sequence.
676 */
677 if (f_ctl & FC_FC_EX_CTX) {
678 xid = ntohs(fh->fh_ox_id); /* we originated exch */
679 ep = fc_exch_find(mp, xid);
680 if (!ep) {
681 atomic_inc(&mp->stats.xid_not_found);
682 reject = FC_RJT_OX_ID;
683 goto out;
684 }
685 if (ep->rxid == FC_XID_UNKNOWN)
686 ep->rxid = ntohs(fh->fh_rx_id);
687 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
688 reject = FC_RJT_OX_ID;
689 goto rel;
690 }
691 } else {
692 xid = ntohs(fh->fh_rx_id); /* we are the responder */
693
694 /*
695 * Special case for MDS issuing an ELS TEST with a
696 * bad rxid of 0.
697 * XXX take this out once we do the proper reject.
698 */
699 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
700 fc_frame_payload_op(fp) == ELS_TEST) {
701 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
702 xid = FC_XID_UNKNOWN;
703 }
704
705 /*
706 * new sequence - find the exchange
707 */
708 ep = fc_exch_find(mp, xid);
709 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
710 if (ep) {
711 atomic_inc(&mp->stats.xid_busy);
712 reject = FC_RJT_RX_ID;
713 goto rel;
714 }
715 ep = fc_exch_resp(mp, fp);
716 if (!ep) {
717 reject = FC_RJT_EXCH_EST; /* XXX */
718 goto out;
719 }
720 xid = ep->xid; /* get our XID */
721 } else if (!ep) {
722 atomic_inc(&mp->stats.xid_not_found);
723 reject = FC_RJT_RX_ID; /* XID not found */
724 goto out;
725 }
726 }
727
728 /*
729 * At this point, we have the exchange held.
730 * Find or create the sequence.
731 */
732 if (fc_sof_is_init(fr_sof(fp))) {
733 sp = fc_seq_start_next(&ep->seq);
734 if (!sp) {
735 reject = FC_RJT_SEQ_XS; /* exchange shortage */
736 goto rel;
737 }
738 sp->id = fh->fh_seq_id;
739 sp->ssb_stat |= SSB_ST_RESP;
740 } else {
741 sp = &ep->seq;
742 if (sp->id != fh->fh_seq_id) {
743 atomic_inc(&mp->stats.seq_not_found);
744 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
745 goto rel;
746 }
747 }
748 WARN_ON(ep != fc_seq_exch(sp));
749
750 if (f_ctl & FC_FC_SEQ_INIT)
751 ep->esb_stat |= ESB_ST_SEQ_INIT;
752
753 fr_seq(fp) = sp;
754out:
755 return reject;
756rel:
757 fc_exch_done(&ep->seq);
758 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
759 return reject;
760}
761
762/*
763 * Find the sequence for a frame being received.
764 * We originated the sequence, so it should be found.
765 * We may or may not have originated the exchange.
766 * Does not hold the sequence for the caller.
767 */
768static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
769 struct fc_frame *fp)
770{
771 struct fc_frame_header *fh = fc_frame_header_get(fp);
772 struct fc_exch *ep;
773 struct fc_seq *sp = NULL;
774 u32 f_ctl;
775 u16 xid;
776
777 f_ctl = ntoh24(fh->fh_f_ctl);
778 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
779 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
780 ep = fc_exch_find(mp, xid);
781 if (!ep)
782 return NULL;
783 if (ep->seq.id == fh->fh_seq_id) {
784 /*
785 * Save the RX_ID if we didn't previously know it.
786 */
787 sp = &ep->seq;
788 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
789 ep->rxid == FC_XID_UNKNOWN) {
790 ep->rxid = ntohs(fh->fh_rx_id);
791 }
792 }
793 fc_exch_release(ep);
794 return sp;
795}
796
797/*
798 * Set addresses for an exchange.
799 * Note this must be done before the first sequence of the exchange is sent.
800 */
801static void fc_exch_set_addr(struct fc_exch *ep,
802 u32 orig_id, u32 resp_id)
803{
804 ep->oid = orig_id;
805 if (ep->esb_stat & ESB_ST_RESP) {
806 ep->sid = resp_id;
807 ep->did = orig_id;
808 } else {
809 ep->sid = orig_id;
810 ep->did = resp_id;
811 }
812}
813
814static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
815{
816 struct fc_exch *ep = fc_seq_exch(sp);
817
818 sp = fc_seq_alloc(ep, ep->seq_id++);
7414705e
RL
819 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
820 ep->f_ctl, sp->id);
42e9a92f
RL
821 return sp;
822}
823/*
824 * Allocate a new sequence on the same exchange as the supplied sequence.
825 * This will never return NULL.
826 */
827struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
828{
829 struct fc_exch *ep = fc_seq_exch(sp);
830
831 spin_lock_bh(&ep->ex_lock);
42e9a92f
RL
832 sp = fc_seq_start_next_locked(sp);
833 spin_unlock_bh(&ep->ex_lock);
834
835 return sp;
836}
837EXPORT_SYMBOL(fc_seq_start_next);
838
839int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
840{
841 struct fc_exch *ep;
842 struct fc_frame_header *fh = fc_frame_header_get(fp);
843 int error;
844 u32 f_ctl;
845
846 ep = fc_seq_exch(sp);
847 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
848
849 f_ctl = ntoh24(fh->fh_f_ctl);
850 fc_exch_setup_hdr(ep, fp, f_ctl);
851
852 /*
853 * update sequence count if this frame is carrying
854 * multiple FC frames when sequence offload is enabled
855 * by LLD.
856 */
857 if (fr_max_payload(fp))
858 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
859 fr_max_payload(fp));
860 else
861 sp->cnt++;
862
863 /*
864 * Send the frame.
865 */
866 error = lp->tt.frame_send(lp, fp);
867
868 /*
869 * Update the exchange and sequence flags,
870 * assuming all frames for the sequence have been sent.
871 * We can only be called to send once for each sequence.
872 */
873 spin_lock_bh(&ep->ex_lock);
874 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
875 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
876 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
877 spin_unlock_bh(&ep->ex_lock);
878 return error;
879}
880EXPORT_SYMBOL(fc_seq_send);
881
882void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
883 struct fc_seq_els_data *els_data)
884{
885 switch (els_cmd) {
886 case ELS_LS_RJT:
887 fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
888 break;
889 case ELS_LS_ACC:
890 fc_seq_ls_acc(sp);
891 break;
892 case ELS_RRQ:
893 fc_exch_els_rrq(sp, els_data->fp);
894 break;
895 case ELS_REC:
896 fc_exch_els_rec(sp, els_data->fp);
897 break;
898 default:
7414705e 899 FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd);
42e9a92f
RL
900 }
901}
902EXPORT_SYMBOL(fc_seq_els_rsp_send);
903
904/*
905 * Send a sequence, which is also the last sequence in the exchange.
906 */
907static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
908 enum fc_rctl rctl, enum fc_fh_type fh_type)
909{
910 u32 f_ctl;
911 struct fc_exch *ep = fc_seq_exch(sp);
912
913 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
914 f_ctl |= ep->f_ctl;
915 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
916 fc_seq_send(ep->lp, sp, fp);
917}
918
919/*
920 * Send ACK_1 (or equiv.) indicating we received something.
921 * The frame we're acking is supplied.
922 */
923static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
924{
925 struct fc_frame *fp;
926 struct fc_frame_header *rx_fh;
927 struct fc_frame_header *fh;
928 struct fc_exch *ep = fc_seq_exch(sp);
929 struct fc_lport *lp = ep->lp;
930 unsigned int f_ctl;
931
932 /*
933 * Don't send ACKs for class 3.
934 */
935 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
936 fp = fc_frame_alloc(lp, 0);
937 if (!fp)
938 return;
939
940 fh = fc_frame_header_get(fp);
941 fh->fh_r_ctl = FC_RCTL_ACK_1;
942 fh->fh_type = FC_TYPE_BLS;
943
944 /*
945 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
946 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
947 * Bits 9-8 are meaningful (retransmitted or unidirectional).
948 * Last ACK uses bits 7-6 (continue sequence),
949 * bits 5-4 are meaningful (what kind of ACK to use).
950 */
951 rx_fh = fc_frame_header_get(rx_fp);
952 f_ctl = ntoh24(rx_fh->fh_f_ctl);
953 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
954 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
955 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
956 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
957 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
958 hton24(fh->fh_f_ctl, f_ctl);
959
960 fc_exch_setup_hdr(ep, fp, f_ctl);
961 fh->fh_seq_id = rx_fh->fh_seq_id;
962 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
963 fh->fh_parm_offset = htonl(1); /* ack single frame */
964
965 fr_sof(fp) = fr_sof(rx_fp);
966 if (f_ctl & FC_FC_END_SEQ)
967 fr_eof(fp) = FC_EOF_T;
968 else
969 fr_eof(fp) = FC_EOF_N;
970
971 (void) lp->tt.frame_send(lp, fp);
972 }
973}
974
975/*
976 * Send BLS Reject.
977 * This is for rejecting BA_ABTS only.
978 */
b2ab99c9
RL
979static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
980 enum fc_ba_rjt_reason reason,
981 enum fc_ba_rjt_explan explan)
42e9a92f
RL
982{
983 struct fc_frame *fp;
984 struct fc_frame_header *rx_fh;
985 struct fc_frame_header *fh;
986 struct fc_ba_rjt *rp;
987 struct fc_lport *lp;
988 unsigned int f_ctl;
989
990 lp = fr_dev(rx_fp);
991 fp = fc_frame_alloc(lp, sizeof(*rp));
992 if (!fp)
993 return;
994 fh = fc_frame_header_get(fp);
995 rx_fh = fc_frame_header_get(rx_fp);
996
997 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
998
999 rp = fc_frame_payload_get(fp, sizeof(*rp));
1000 rp->br_reason = reason;
1001 rp->br_explan = explan;
1002
1003 /*
1004 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1005 */
1006 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1007 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1008 fh->fh_ox_id = rx_fh->fh_rx_id;
1009 fh->fh_rx_id = rx_fh->fh_ox_id;
1010 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1011 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1012 fh->fh_type = FC_TYPE_BLS;
1013
1014 /*
1015 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1016 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1017 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1018 * Last ACK uses bits 7-6 (continue sequence),
1019 * bits 5-4 are meaningful (what kind of ACK to use).
1020 * Always set LAST_SEQ, END_SEQ.
1021 */
1022 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1023 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1024 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1025 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1026 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1027 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1028 f_ctl &= ~FC_FC_FIRST_SEQ;
1029 hton24(fh->fh_f_ctl, f_ctl);
1030
1031 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1032 fr_eof(fp) = FC_EOF_T;
1033 if (fc_sof_needs_ack(fr_sof(fp)))
1034 fr_eof(fp) = FC_EOF_N;
1035
1036 (void) lp->tt.frame_send(lp, fp);
1037}
1038
1039/*
1040 * Handle an incoming ABTS. This would be for target mode usually,
1041 * but could be due to lost FCP transfer ready, confirm or RRQ.
1042 * We always handle this as an exchange abort, ignoring the parameter.
1043 */
1044static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1045{
1046 struct fc_frame *fp;
1047 struct fc_ba_acc *ap;
1048 struct fc_frame_header *fh;
1049 struct fc_seq *sp;
1050
1051 if (!ep)
1052 goto reject;
1053 spin_lock_bh(&ep->ex_lock);
1054 if (ep->esb_stat & ESB_ST_COMPLETE) {
1055 spin_unlock_bh(&ep->ex_lock);
1056 goto reject;
1057 }
1058 if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1059 fc_exch_hold(ep); /* hold for REC_QUAL */
1060 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1061 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1062
1063 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1064 if (!fp) {
1065 spin_unlock_bh(&ep->ex_lock);
1066 goto free;
1067 }
1068 fh = fc_frame_header_get(fp);
1069 ap = fc_frame_payload_get(fp, sizeof(*ap));
1070 memset(ap, 0, sizeof(*ap));
1071 sp = &ep->seq;
1072 ap->ba_high_seq_cnt = htons(0xffff);
1073 if (sp->ssb_stat & SSB_ST_RESP) {
1074 ap->ba_seq_id = sp->id;
1075 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1076 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1077 ap->ba_low_seq_cnt = htons(sp->cnt);
1078 }
a7e84f2b 1079 sp = fc_seq_start_next_locked(sp);
42e9a92f
RL
1080 spin_unlock_bh(&ep->ex_lock);
1081 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1082 fc_frame_free(rx_fp);
1083 return;
1084
1085reject:
1086 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1087free:
1088 fc_frame_free(rx_fp);
1089}
1090
1091/*
1092 * Handle receive where the other end is originating the sequence.
1093 */
1094static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1095 struct fc_frame *fp)
1096{
1097 struct fc_frame_header *fh = fc_frame_header_get(fp);
1098 struct fc_seq *sp = NULL;
1099 struct fc_exch *ep = NULL;
1100 enum fc_sof sof;
1101 enum fc_eof eof;
1102 u32 f_ctl;
1103 enum fc_pf_rjt_reason reject;
1104
1105 fr_seq(fp) = NULL;
1106 reject = fc_seq_lookup_recip(mp, fp);
1107 if (reject == FC_RJT_NONE) {
1108 sp = fr_seq(fp); /* sequence will be held */
1109 ep = fc_seq_exch(sp);
1110 sof = fr_sof(fp);
1111 eof = fr_eof(fp);
1112 f_ctl = ntoh24(fh->fh_f_ctl);
1113 fc_seq_send_ack(sp, fp);
1114
1115 /*
1116 * Call the receive function.
1117 *
1118 * The receive function may allocate a new sequence
1119 * over the old one, so we shouldn't change the
1120 * sequence after this.
1121 *
1122 * The frame will be freed by the receive function.
1123 * If new exch resp handler is valid then call that
1124 * first.
1125 */
1126 if (ep->resp)
1127 ep->resp(sp, fp, ep->arg);
1128 else
1129 lp->tt.lport_recv(lp, sp, fp);
1130 fc_exch_release(ep); /* release from lookup */
1131 } else {
d459b7ea 1132 FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject);
42e9a92f
RL
1133 fc_frame_free(fp);
1134 }
1135}
1136
1137/*
1138 * Handle receive where the other end is originating the sequence in
1139 * response to our exchange.
1140 */
1141static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1142{
1143 struct fc_frame_header *fh = fc_frame_header_get(fp);
1144 struct fc_seq *sp;
1145 struct fc_exch *ep;
1146 enum fc_sof sof;
1147 u32 f_ctl;
1148 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1149 void *ex_resp_arg;
1150 int rc;
1151
1152 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1153 if (!ep) {
1154 atomic_inc(&mp->stats.xid_not_found);
1155 goto out;
1156 }
30121d14
SM
1157 if (ep->esb_stat & ESB_ST_COMPLETE) {
1158 atomic_inc(&mp->stats.xid_not_found);
1159 goto out;
1160 }
42e9a92f
RL
1161 if (ep->rxid == FC_XID_UNKNOWN)
1162 ep->rxid = ntohs(fh->fh_rx_id);
1163 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1164 atomic_inc(&mp->stats.xid_not_found);
1165 goto rel;
1166 }
1167 if (ep->did != ntoh24(fh->fh_s_id) &&
1168 ep->did != FC_FID_FLOGI) {
1169 atomic_inc(&mp->stats.xid_not_found);
1170 goto rel;
1171 }
1172 sof = fr_sof(fp);
1173 if (fc_sof_is_init(sof)) {
1174 sp = fc_seq_start_next(&ep->seq);
1175 sp->id = fh->fh_seq_id;
1176 sp->ssb_stat |= SSB_ST_RESP;
1177 } else {
1178 sp = &ep->seq;
1179 if (sp->id != fh->fh_seq_id) {
1180 atomic_inc(&mp->stats.seq_not_found);
1181 goto rel;
1182 }
1183 }
1184 f_ctl = ntoh24(fh->fh_f_ctl);
1185 fr_seq(fp) = sp;
1186 if (f_ctl & FC_FC_SEQ_INIT)
1187 ep->esb_stat |= ESB_ST_SEQ_INIT;
1188
1189 if (fc_sof_needs_ack(sof))
1190 fc_seq_send_ack(sp, fp);
1191 resp = ep->resp;
1192 ex_resp_arg = ep->arg;
1193
1194 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1195 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1196 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1197 spin_lock_bh(&ep->ex_lock);
1198 rc = fc_exch_done_locked(ep);
1199 WARN_ON(fc_seq_exch(sp) != ep);
1200 spin_unlock_bh(&ep->ex_lock);
1201 if (!rc)
1202 fc_exch_mgr_delete_ep(ep);
1203 }
1204
1205 /*
1206 * Call the receive function.
1207 * The sequence is held (has a refcnt) for us,
1208 * but not for the receive function.
1209 *
1210 * The receive function may allocate a new sequence
1211 * over the old one, so we shouldn't change the
1212 * sequence after this.
1213 *
1214 * The frame will be freed by the receive function.
1215 * If new exch resp handler is valid then call that
1216 * first.
1217 */
1218 if (resp)
1219 resp(sp, fp, ex_resp_arg);
1220 else
1221 fc_frame_free(fp);
1222 fc_exch_release(ep);
1223 return;
1224rel:
1225 fc_exch_release(ep);
1226out:
1227 fc_frame_free(fp);
1228}
1229
1230/*
1231 * Handle receive for a sequence where other end is responding to our sequence.
1232 */
1233static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1234{
1235 struct fc_seq *sp;
1236
1237 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
d459b7ea
RL
1238
1239 if (!sp)
42e9a92f 1240 atomic_inc(&mp->stats.xid_not_found);
d459b7ea 1241 else
42e9a92f 1242 atomic_inc(&mp->stats.non_bls_resp);
d459b7ea 1243
42e9a92f
RL
1244 fc_frame_free(fp);
1245}
1246
1247/*
1248 * Handle the response to an ABTS for exchange or sequence.
1249 * This can be BA_ACC or BA_RJT.
1250 */
1251static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1252{
1253 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1254 void *ex_resp_arg;
1255 struct fc_frame_header *fh;
1256 struct fc_ba_acc *ap;
1257 struct fc_seq *sp;
1258 u16 low;
1259 u16 high;
1260 int rc = 1, has_rec = 0;
1261
1262 fh = fc_frame_header_get(fp);
7414705e
RL
1263 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1264 fc_exch_rctl_name(fh->fh_r_ctl));
42e9a92f
RL
1265
1266 if (cancel_delayed_work_sync(&ep->timeout_work))
1267 fc_exch_release(ep); /* release from pending timer hold */
1268
1269 spin_lock_bh(&ep->ex_lock);
1270 switch (fh->fh_r_ctl) {
1271 case FC_RCTL_BA_ACC:
1272 ap = fc_frame_payload_get(fp, sizeof(*ap));
1273 if (!ap)
1274 break;
1275
1276 /*
1277 * Decide whether to establish a Recovery Qualifier.
1278 * We do this if there is a non-empty SEQ_CNT range and
1279 * SEQ_ID is the same as the one we aborted.
1280 */
1281 low = ntohs(ap->ba_low_seq_cnt);
1282 high = ntohs(ap->ba_high_seq_cnt);
1283 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1284 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1285 ap->ba_seq_id == ep->seq_id) && low != high) {
1286 ep->esb_stat |= ESB_ST_REC_QUAL;
1287 fc_exch_hold(ep); /* hold for recovery qualifier */
1288 has_rec = 1;
1289 }
1290 break;
1291 case FC_RCTL_BA_RJT:
1292 break;
1293 default:
1294 break;
1295 }
1296
1297 resp = ep->resp;
1298 ex_resp_arg = ep->arg;
1299
1300 /* do we need to do some other checks here. Can we reuse more of
1301 * fc_exch_recv_seq_resp
1302 */
1303 sp = &ep->seq;
1304 /*
1305 * do we want to check END_SEQ as well as LAST_SEQ here?
1306 */
1307 if (ep->fh_type != FC_TYPE_FCP &&
1308 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1309 rc = fc_exch_done_locked(ep);
1310 spin_unlock_bh(&ep->ex_lock);
1311 if (!rc)
1312 fc_exch_mgr_delete_ep(ep);
1313
1314 if (resp)
1315 resp(sp, fp, ex_resp_arg);
1316 else
1317 fc_frame_free(fp);
1318
1319 if (has_rec)
1320 fc_exch_timer_set(ep, ep->r_a_tov);
1321
1322}
1323
1324/*
1325 * Receive BLS sequence.
1326 * This is always a sequence initiated by the remote side.
1327 * We may be either the originator or recipient of the exchange.
1328 */
1329static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1330{
1331 struct fc_frame_header *fh;
1332 struct fc_exch *ep;
1333 u32 f_ctl;
1334
1335 fh = fc_frame_header_get(fp);
1336 f_ctl = ntoh24(fh->fh_f_ctl);
1337 fr_seq(fp) = NULL;
1338
1339 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1340 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1341 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1342 spin_lock_bh(&ep->ex_lock);
1343 ep->esb_stat |= ESB_ST_SEQ_INIT;
1344 spin_unlock_bh(&ep->ex_lock);
1345 }
1346 if (f_ctl & FC_FC_SEQ_CTX) {
1347 /*
1348 * A response to a sequence we initiated.
1349 * This should only be ACKs for class 2 or F.
1350 */
1351 switch (fh->fh_r_ctl) {
1352 case FC_RCTL_ACK_1:
1353 case FC_RCTL_ACK_0:
1354 break;
1355 default:
7414705e
RL
1356 FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
1357 fh->fh_r_ctl,
1358 fc_exch_rctl_name(fh->fh_r_ctl));
42e9a92f
RL
1359 break;
1360 }
1361 fc_frame_free(fp);
1362 } else {
1363 switch (fh->fh_r_ctl) {
1364 case FC_RCTL_BA_RJT:
1365 case FC_RCTL_BA_ACC:
1366 if (ep)
1367 fc_exch_abts_resp(ep, fp);
1368 else
1369 fc_frame_free(fp);
1370 break;
1371 case FC_RCTL_BA_ABTS:
1372 fc_exch_recv_abts(ep, fp);
1373 break;
1374 default: /* ignore junk */
1375 fc_frame_free(fp);
1376 break;
1377 }
1378 }
1379 if (ep)
1380 fc_exch_release(ep); /* release hold taken by fc_exch_find */
1381}
1382
1383/*
1384 * Accept sequence with LS_ACC.
1385 * If this fails due to allocation or transmit congestion, assume the
1386 * originator will repeat the sequence.
1387 */
1388static void fc_seq_ls_acc(struct fc_seq *req_sp)
1389{
1390 struct fc_seq *sp;
1391 struct fc_els_ls_acc *acc;
1392 struct fc_frame *fp;
1393
1394 sp = fc_seq_start_next(req_sp);
1395 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1396 if (fp) {
1397 acc = fc_frame_payload_get(fp, sizeof(*acc));
1398 memset(acc, 0, sizeof(*acc));
1399 acc->la_cmd = ELS_LS_ACC;
1400 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1401 }
1402}
1403
1404/*
1405 * Reject sequence with ELS LS_RJT.
1406 * If this fails due to allocation or transmit congestion, assume the
1407 * originator will repeat the sequence.
1408 */
1409static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
1410 enum fc_els_rjt_explan explan)
1411{
1412 struct fc_seq *sp;
1413 struct fc_els_ls_rjt *rjt;
1414 struct fc_frame *fp;
1415
1416 sp = fc_seq_start_next(req_sp);
1417 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
1418 if (fp) {
1419 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1420 memset(rjt, 0, sizeof(*rjt));
1421 rjt->er_cmd = ELS_LS_RJT;
1422 rjt->er_reason = reason;
1423 rjt->er_explan = explan;
1424 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1425 }
1426}
1427
1428static void fc_exch_reset(struct fc_exch *ep)
1429{
1430 struct fc_seq *sp;
1431 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1432 void *arg;
1433 int rc = 1;
1434
1435 spin_lock_bh(&ep->ex_lock);
1436 ep->state |= FC_EX_RST_CLEANUP;
1437 /*
1438 * we really want to call del_timer_sync, but cannot due
1439 * to the lport calling with the lport lock held (some resp
1440 * functions can also grab the lport lock which could cause
1441 * a deadlock).
1442 */
1443 if (cancel_delayed_work(&ep->timeout_work))
1444 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1445 resp = ep->resp;
1446 ep->resp = NULL;
1447 if (ep->esb_stat & ESB_ST_REC_QUAL)
1448 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1449 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1450 arg = ep->arg;
1451 sp = &ep->seq;
1452 rc = fc_exch_done_locked(ep);
1453 spin_unlock_bh(&ep->ex_lock);
1454 if (!rc)
1455 fc_exch_mgr_delete_ep(ep);
1456
1457 if (resp)
1458 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1459}
1460
1461/*
1462 * Reset an exchange manager, releasing all sequences and exchanges.
1463 * If sid is non-zero, reset only exchanges we source from that FID.
1464 * If did is non-zero, reset only exchanges destined to that FID.
1465 */
1f6ff364 1466void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
42e9a92f
RL
1467{
1468 struct fc_exch *ep;
1469 struct fc_exch *next;
1f6ff364 1470 struct fc_exch_mgr *mp = lp->emp;
42e9a92f
RL
1471
1472 spin_lock_bh(&mp->em_lock);
1473restart:
1474 list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
1475 if ((sid == 0 || sid == ep->sid) &&
1476 (did == 0 || did == ep->did)) {
1477 fc_exch_hold(ep);
1478 spin_unlock_bh(&mp->em_lock);
1479
1480 fc_exch_reset(ep);
1481
1482 fc_exch_release(ep);
1483 spin_lock_bh(&mp->em_lock);
1484
1485 /*
1486 * must restart loop incase while lock was down
1487 * multiple eps were released.
1488 */
1489 goto restart;
1490 }
1491 }
1492 spin_unlock_bh(&mp->em_lock);
1493}
1494EXPORT_SYMBOL(fc_exch_mgr_reset);
1495
1496/*
1497 * Handle incoming ELS REC - Read Exchange Concise.
1498 * Note that the requesting port may be different than the S_ID in the request.
1499 */
1500static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1501{
1502 struct fc_frame *fp;
1503 struct fc_exch *ep;
1504 struct fc_exch_mgr *em;
1505 struct fc_els_rec *rp;
1506 struct fc_els_rec_acc *acc;
1507 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1508 enum fc_els_rjt_explan explan;
1509 u32 sid;
1510 u16 rxid;
1511 u16 oxid;
1512
1513 rp = fc_frame_payload_get(rfp, sizeof(*rp));
1514 explan = ELS_EXPL_INV_LEN;
1515 if (!rp)
1516 goto reject;
1517 sid = ntoh24(rp->rec_s_id);
1518 rxid = ntohs(rp->rec_rx_id);
1519 oxid = ntohs(rp->rec_ox_id);
1520
1521 /*
1522 * Currently it's hard to find the local S_ID from the exchange
1523 * manager. This will eventually be fixed, but for now it's easier
1524 * to lookup the subject exchange twice, once as if we were
1525 * the initiator, and then again if we weren't.
1526 */
1527 em = fc_seq_exch(sp)->em;
1528 ep = fc_exch_find(em, oxid);
1529 explan = ELS_EXPL_OXID_RXID;
1530 if (ep && ep->oid == sid) {
1531 if (ep->rxid != FC_XID_UNKNOWN &&
1532 rxid != FC_XID_UNKNOWN &&
1533 ep->rxid != rxid)
1534 goto rel;
1535 } else {
1536 if (ep)
1537 fc_exch_release(ep);
1538 ep = NULL;
1539 if (rxid != FC_XID_UNKNOWN)
1540 ep = fc_exch_find(em, rxid);
1541 if (!ep)
1542 goto reject;
1543 }
1544
1545 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1546 if (!fp) {
1547 fc_exch_done(sp);
1548 goto out;
1549 }
1550 sp = fc_seq_start_next(sp);
1551 acc = fc_frame_payload_get(fp, sizeof(*acc));
1552 memset(acc, 0, sizeof(*acc));
1553 acc->reca_cmd = ELS_LS_ACC;
1554 acc->reca_ox_id = rp->rec_ox_id;
1555 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1556 acc->reca_rx_id = htons(ep->rxid);
1557 if (ep->sid == ep->oid)
1558 hton24(acc->reca_rfid, ep->did);
1559 else
1560 hton24(acc->reca_rfid, ep->sid);
1561 acc->reca_fc4value = htonl(ep->seq.rec_data);
1562 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1563 ESB_ST_SEQ_INIT |
1564 ESB_ST_COMPLETE));
1565 sp = fc_seq_start_next(sp);
1566 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1567out:
1568 fc_exch_release(ep);
1569 fc_frame_free(rfp);
1570 return;
1571
1572rel:
1573 fc_exch_release(ep);
1574reject:
1575 fc_seq_ls_rjt(sp, reason, explan);
1576 fc_frame_free(rfp);
1577}
1578
1579/*
1580 * Handle response from RRQ.
1581 * Not much to do here, really.
1582 * Should report errors.
1583 *
1584 * TODO: fix error handler.
1585 */
1586static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1587{
1588 struct fc_exch *aborted_ep = arg;
1589 unsigned int op;
1590
1591 if (IS_ERR(fp)) {
1592 int err = PTR_ERR(fp);
1593
78342da3 1594 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
42e9a92f 1595 goto cleanup;
7414705e
RL
1596 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
1597 "frame error %d\n", err);
42e9a92f
RL
1598 return;
1599 }
1600
1601 op = fc_frame_payload_op(fp);
1602 fc_frame_free(fp);
1603
1604 switch (op) {
1605 case ELS_LS_RJT:
7414705e 1606 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
42e9a92f
RL
1607 /* fall through */
1608 case ELS_LS_ACC:
1609 goto cleanup;
1610 default:
7414705e
RL
1611 FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
1612 "for RRQ", op);
42e9a92f
RL
1613 return;
1614 }
1615
1616cleanup:
1617 fc_exch_done(&aborted_ep->seq);
1618 /* drop hold for rec qual */
1619 fc_exch_release(aborted_ep);
1620}
1621
1622/*
1623 * Send ELS RRQ - Reinstate Recovery Qualifier.
1624 * This tells the remote port to stop blocking the use of
1625 * the exchange and the seq_cnt range.
1626 */
1627static void fc_exch_rrq(struct fc_exch *ep)
1628{
1629 struct fc_lport *lp;
1630 struct fc_els_rrq *rrq;
1631 struct fc_frame *fp;
42e9a92f
RL
1632 u32 did;
1633
1634 lp = ep->lp;
1635
1636 fp = fc_frame_alloc(lp, sizeof(*rrq));
1637 if (!fp)
a0cc1ecc
VD
1638 goto retry;
1639
42e9a92f
RL
1640 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1641 memset(rrq, 0, sizeof(*rrq));
1642 rrq->rrq_cmd = ELS_RRQ;
1643 hton24(rrq->rrq_s_id, ep->sid);
1644 rrq->rrq_ox_id = htons(ep->oxid);
1645 rrq->rrq_rx_id = htons(ep->rxid);
1646
1647 did = ep->did;
1648 if (ep->esb_stat & ESB_ST_RESP)
1649 did = ep->sid;
1650
1651 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
1652 fc_host_port_id(lp->host), FC_TYPE_ELS,
1653 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1654
a0cc1ecc
VD
1655 if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov))
1656 return;
1657
1658retry:
1659 spin_lock_bh(&ep->ex_lock);
1660 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
1661 spin_unlock_bh(&ep->ex_lock);
1662 /* drop hold for rec qual */
1663 fc_exch_release(ep);
42e9a92f
RL
1664 return;
1665 }
a0cc1ecc
VD
1666 ep->esb_stat |= ESB_ST_REC_QUAL;
1667 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1668 spin_unlock_bh(&ep->ex_lock);
42e9a92f
RL
1669}
1670
1671
1672/*
1673 * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1674 */
1675static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1676{
1677 struct fc_exch *ep; /* request or subject exchange */
1678 struct fc_els_rrq *rp;
1679 u32 sid;
1680 u16 xid;
1681 enum fc_els_rjt_explan explan;
1682
1683 rp = fc_frame_payload_get(fp, sizeof(*rp));
1684 explan = ELS_EXPL_INV_LEN;
1685 if (!rp)
1686 goto reject;
1687
1688 /*
1689 * lookup subject exchange.
1690 */
1691 ep = fc_seq_exch(sp);
1692 sid = ntoh24(rp->rrq_s_id); /* subject source */
1693 xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
1694 ep = fc_exch_find(ep->em, xid);
1695
1696 explan = ELS_EXPL_OXID_RXID;
1697 if (!ep)
1698 goto reject;
1699 spin_lock_bh(&ep->ex_lock);
1700 if (ep->oxid != ntohs(rp->rrq_ox_id))
1701 goto unlock_reject;
1702 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
1703 ep->rxid != FC_XID_UNKNOWN)
1704 goto unlock_reject;
1705 explan = ELS_EXPL_SID;
1706 if (ep->sid != sid)
1707 goto unlock_reject;
1708
1709 /*
1710 * Clear Recovery Qualifier state, and cancel timer if complete.
1711 */
1712 if (ep->esb_stat & ESB_ST_REC_QUAL) {
1713 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1714 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
1715 }
1716 if (ep->esb_stat & ESB_ST_COMPLETE) {
1717 if (cancel_delayed_work(&ep->timeout_work))
1718 atomic_dec(&ep->ex_refcnt); /* drop timer hold */
1719 }
1720
1721 spin_unlock_bh(&ep->ex_lock);
1722
1723 /*
1724 * Send LS_ACC.
1725 */
1726 fc_seq_ls_acc(sp);
1727 fc_frame_free(fp);
1728 return;
1729
1730unlock_reject:
1731 spin_unlock_bh(&ep->ex_lock);
1732 fc_exch_release(ep); /* drop hold from fc_exch_find */
1733reject:
1734 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
1735 fc_frame_free(fp);
1736}
1737
96316099
VD
1738struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
1739 struct fc_exch_mgr *mp,
1740 bool (*match)(struct fc_frame *))
1741{
1742 struct fc_exch_mgr_anchor *ema;
1743
1744 ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
1745 if (!ema)
1746 return ema;
1747
1748 ema->mp = mp;
1749 ema->match = match;
1750 /* add EM anchor to EM anchors list */
1751 list_add_tail(&ema->ema_list, &lport->ema_list);
1752 kref_get(&mp->kref);
1753 return ema;
1754}
1755EXPORT_SYMBOL(fc_exch_mgr_add);
1756
1757static void fc_exch_mgr_destroy(struct kref *kref)
1758{
1759 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
1760
1761 /*
1762 * The total exch count must be zero
1763 * before freeing exchange manager.
1764 */
1765 WARN_ON(mp->total_exches != 0);
1766 mempool_destroy(mp->ep_pool);
1767 kfree(mp);
1768}
1769
1770void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
1771{
1772 /* remove EM anchor from EM anchors list */
1773 list_del(&ema->ema_list);
1774 kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
1775 kfree(ema);
1776}
1777EXPORT_SYMBOL(fc_exch_mgr_del);
1778
42e9a92f
RL
1779struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1780 enum fc_class class,
1781 u16 min_xid, u16 max_xid)
1782{
1783 struct fc_exch_mgr *mp;
1784 size_t len;
1785
1786 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
7414705e
RL
1787 FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1788 min_xid, max_xid);
42e9a92f
RL
1789 return NULL;
1790 }
1791
1792 /*
1793 * Memory need for EM
1794 */
1795#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
1796 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
1797 len += sizeof(struct fc_exch_mgr);
1798
1799 mp = kzalloc(len, GFP_ATOMIC);
1800 if (!mp)
1801 return NULL;
1802
1803 mp->class = class;
1804 mp->total_exches = 0;
1805 mp->exches = (struct fc_exch **)(mp + 1);
1806 mp->lp = lp;
1807 /* adjust em exch xid range for offload */
1808 mp->min_xid = min_xid;
1809 mp->max_xid = max_xid;
1810 mp->last_xid = min_xid - 1;
1811 mp->max_read = 0;
1812 mp->last_read = 0;
1813 if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
1814 mp->max_read = lp->lro_xid;
1815 mp->last_read = min_xid - 1;
1816 mp->last_xid = mp->max_read;
1817 } else {
1818 /* disable lro if no xid control over read */
1819 lp->lro_enabled = 0;
1820 }
1821
1822 INIT_LIST_HEAD(&mp->ex_list);
1823 spin_lock_init(&mp->em_lock);
1824
1825 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
1826 if (!mp->ep_pool)
1827 goto free_mp;
1828
1829 return mp;
1830
1831free_mp:
1832 kfree(mp);
1833 return NULL;
1834}
1835EXPORT_SYMBOL(fc_exch_mgr_alloc);
1836
1837void fc_exch_mgr_free(struct fc_exch_mgr *mp)
1838{
1839 WARN_ON(!mp);
1840 /*
1841 * The total exch count must be zero
1842 * before freeing exchange manager.
1843 */
1844 WARN_ON(mp->total_exches != 0);
1845 mempool_destroy(mp->ep_pool);
1846 kfree(mp);
1847}
1848EXPORT_SYMBOL(fc_exch_mgr_free);
1849
1850struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
1851{
1852 if (!lp || !lp->emp)
1853 return NULL;
1854
1855 return fc_exch_alloc(lp->emp, fp, 0);
1856}
1857EXPORT_SYMBOL(fc_exch_get);
1858
1859struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1860 struct fc_frame *fp,
1861 void (*resp)(struct fc_seq *,
1862 struct fc_frame *fp,
1863 void *arg),
1864 void (*destructor)(struct fc_seq *, void *),
1865 void *arg, u32 timer_msec)
1866{
1867 struct fc_exch *ep;
1868 struct fc_seq *sp = NULL;
1869 struct fc_frame_header *fh;
1870 int rc = 1;
1871
1872 ep = lp->tt.exch_get(lp, fp);
1873 if (!ep) {
1874 fc_frame_free(fp);
1875 return NULL;
1876 }
1877 ep->esb_stat |= ESB_ST_SEQ_INIT;
1878 fh = fc_frame_header_get(fp);
1879 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1880 ep->resp = resp;
1881 ep->destructor = destructor;
1882 ep->arg = arg;
1883 ep->r_a_tov = FC_DEF_R_A_TOV;
1884 ep->lp = lp;
1885 sp = &ep->seq;
1886
1887 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
1888 ep->f_ctl = ntoh24(fh->fh_f_ctl);
1889 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1890 sp->cnt++;
1891
b277d2aa
YZ
1892 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
1893
42e9a92f
RL
1894 if (unlikely(lp->tt.frame_send(lp, fp)))
1895 goto err;
1896
1897 if (timer_msec)
1898 fc_exch_timer_set_locked(ep, timer_msec);
1899 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1900
1901 if (ep->f_ctl & FC_FC_SEQ_INIT)
1902 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1903 spin_unlock_bh(&ep->ex_lock);
1904 return sp;
1905err:
1906 rc = fc_exch_done_locked(ep);
1907 spin_unlock_bh(&ep->ex_lock);
1908 if (!rc)
1909 fc_exch_mgr_delete_ep(ep);
1910 return NULL;
1911}
1912EXPORT_SYMBOL(fc_exch_seq_send);
1913
1914/*
1915 * Receive a frame
1916 */
1917void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1918 struct fc_frame *fp)
1919{
1920 struct fc_frame_header *fh = fc_frame_header_get(fp);
1921 u32 f_ctl;
1922
1923 /* lport lock ? */
b1d9fd55 1924 if (!lp || !mp || lp->state == LPORT_ST_DISABLED) {
7414705e
RL
1925 FC_LPORT_DBG(lp, "Receiving frames for an lport that "
1926 "has not been initialized correctly\n");
42e9a92f
RL
1927 fc_frame_free(fp);
1928 return;
1929 }
1930
1931 /*
1932 * If frame is marked invalid, just drop it.
1933 */
1934 f_ctl = ntoh24(fh->fh_f_ctl);
1935 switch (fr_eof(fp)) {
1936 case FC_EOF_T:
1937 if (f_ctl & FC_FC_END_SEQ)
1938 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
1939 /* fall through */
1940 case FC_EOF_N:
1941 if (fh->fh_type == FC_TYPE_BLS)
1942 fc_exch_recv_bls(mp, fp);
1943 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
1944 FC_FC_EX_CTX)
1945 fc_exch_recv_seq_resp(mp, fp);
1946 else if (f_ctl & FC_FC_SEQ_CTX)
1947 fc_exch_recv_resp(mp, fp);
1948 else
1949 fc_exch_recv_req(lp, mp, fp);
1950 break;
1951 default:
d459b7ea 1952 FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp));
42e9a92f
RL
1953 fc_frame_free(fp);
1954 break;
1955 }
1956}
1957EXPORT_SYMBOL(fc_exch_recv);
1958
1959int fc_exch_init(struct fc_lport *lp)
1960{
1961 if (!lp->tt.exch_get) {
1962 /*
1963 * exch_put() should be NULL if
1964 * exch_get() is NULL
1965 */
1966 WARN_ON(lp->tt.exch_put);
1967 lp->tt.exch_get = fc_exch_get;
1968 }
1969
1970 if (!lp->tt.seq_start_next)
1971 lp->tt.seq_start_next = fc_seq_start_next;
1972
1973 if (!lp->tt.exch_seq_send)
1974 lp->tt.exch_seq_send = fc_exch_seq_send;
1975
1976 if (!lp->tt.seq_send)
1977 lp->tt.seq_send = fc_seq_send;
1978
1979 if (!lp->tt.seq_els_rsp_send)
1980 lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
1981
1982 if (!lp->tt.exch_done)
1983 lp->tt.exch_done = fc_exch_done;
1984
1985 if (!lp->tt.exch_mgr_reset)
1986 lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
1987
1988 if (!lp->tt.seq_exch_abort)
1989 lp->tt.seq_exch_abort = fc_seq_exch_abort;
1990
1991 return 0;
1992}
1993EXPORT_SYMBOL(fc_exch_init);
1994
1995int fc_setup_exch_mgr(void)
1996{
1997 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
1998 0, SLAB_HWCACHE_ALIGN, NULL);
1999 if (!fc_em_cachep)
2000 return -ENOMEM;
2001 return 0;
2002}
2003
2004void fc_destroy_exch_mgr(void)
2005{
2006 kmem_cache_destroy(fc_em_cachep);
2007}