2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/slab.h>
35 #include <linux/workqueue.h>
36 #include <linux/skbuff.h>
37 #include <linux/timer.h>
38 #include <linux/notifier.h>
39 #include <linux/inetdevice.h>
41 #include <net/neighbour.h>
42 #include <net/netevent.h>
43 #include <net/route.h>
46 #include "cxgb3_offload.h"
48 #include "iwch_provider.h"
51 static char *states[] = {
68 module_param(peer2peer, int, 0644);
69 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
71 static int ep_timeout_secs = 60;
72 module_param(ep_timeout_secs, int, 0644);
73 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
74 "in seconds (default=60)");
76 static int mpa_rev = 1;
77 module_param(mpa_rev, int, 0644);
78 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
79 "1 is spec compliant. (default=1)");
81 static int markers_enabled = 0;
82 module_param(markers_enabled, int, 0644);
83 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
85 static int crc_enabled = 1;
86 module_param(crc_enabled, int, 0644);
87 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
89 static int rcv_win = 256 * 1024;
90 module_param(rcv_win, int, 0644);
91 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
93 static int snd_win = 32 * 1024;
94 module_param(snd_win, int, 0644);
95 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
97 static unsigned int nocong = 0;
98 module_param(nocong, uint, 0644);
99 MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
101 static unsigned int cong_flavor = 1;
102 module_param(cong_flavor, uint, 0644);
103 MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
105 static struct workqueue_struct *workq;
107 static struct sk_buff_head rxq;
109 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
110 static void ep_timeout(unsigned long arg);
111 static void connect_reply_upcall(struct iwch_ep *ep, int status);
113 static void start_ep_timer(struct iwch_ep *ep)
115 PDBG("%s ep %p\n", __func__, ep);
116 if (timer_pending(&ep->timer)) {
117 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
118 del_timer_sync(&ep->timer);
121 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
122 ep->timer.data = (unsigned long)ep;
123 ep->timer.function = ep_timeout;
124 add_timer(&ep->timer);
127 static void stop_ep_timer(struct iwch_ep *ep)
129 PDBG("%s ep %p\n", __func__, ep);
130 if (!timer_pending(&ep->timer)) {
131 printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
132 __func__, ep, ep->com.state);
136 del_timer_sync(&ep->timer);
140 static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
143 struct cxio_rdev *rdev;
145 rdev = (struct cxio_rdev *)tdev->ulp;
146 if (cxio_fatal_error(rdev)) {
150 error = l2t_send(tdev, skb, l2e);
156 int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
159 struct cxio_rdev *rdev;
161 rdev = (struct cxio_rdev *)tdev->ulp;
162 if (cxio_fatal_error(rdev)) {
166 error = cxgb3_ofld_send(tdev, skb);
172 static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
174 struct cpl_tid_release *req;
176 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
179 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
180 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
181 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
182 skb->priority = CPL_PRIORITY_SETUP;
183 iwch_cxgb3_ofld_send(tdev, skb);
187 int iwch_quiesce_tid(struct iwch_ep *ep)
189 struct cpl_set_tcb_field *req;
190 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
194 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
195 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
196 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
197 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
200 req->word = htons(W_TCB_RX_QUIESCE);
201 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
202 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
204 skb->priority = CPL_PRIORITY_DATA;
205 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
208 int iwch_resume_tid(struct iwch_ep *ep)
210 struct cpl_set_tcb_field *req;
211 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
215 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
216 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
217 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
218 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
221 req->word = htons(W_TCB_RX_QUIESCE);
222 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
225 skb->priority = CPL_PRIORITY_DATA;
226 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
229 static void set_emss(struct iwch_ep *ep, u16 opt)
231 PDBG("%s ep %p opt %u\n", __func__, ep, opt);
232 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
233 if (G_TCPOPT_TSTAMP(opt))
237 PDBG("emss=%d\n", ep->emss);
240 static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
243 enum iwch_ep_state state;
245 spin_lock_irqsave(&epc->lock, flags);
247 spin_unlock_irqrestore(&epc->lock, flags);
251 static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
256 static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
260 spin_lock_irqsave(&epc->lock, flags);
261 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
262 __state_set(epc, new);
263 spin_unlock_irqrestore(&epc->lock, flags);
267 static void *alloc_ep(int size, gfp_t gfp)
269 struct iwch_ep_common *epc;
271 epc = kzalloc(size, gfp);
273 kref_init(&epc->kref);
274 spin_lock_init(&epc->lock);
275 init_waitqueue_head(&epc->waitq);
277 PDBG("%s alloc ep %p\n", __func__, epc);
281 void __free_ep(struct kref *kref)
284 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
285 struct iwch_ep, com);
286 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
287 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
288 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
289 dst_release(ep->dst);
290 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
295 static void release_ep_resources(struct iwch_ep *ep)
297 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
298 set_bit(RELEASE_RESOURCES, &ep->com.flags);
302 static int status2errno(int status)
307 case CPL_ERR_CONN_RESET:
309 case CPL_ERR_ARP_MISS:
310 return -EHOSTUNREACH;
311 case CPL_ERR_CONN_TIMEDOUT:
313 case CPL_ERR_TCAM_FULL:
315 case CPL_ERR_CONN_EXIST:
323 * Try and reuse skbs already allocated...
325 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
327 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
331 skb = alloc_skb(len, gfp);
336 static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
337 __be32 peer_ip, __be16 local_port,
338 __be16 peer_port, u8 tos)
343 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
344 peer_port, local_port, IPPROTO_TCP,
351 static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
355 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
360 static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
362 PDBG("%s t3cdev %p\n", __func__, dev);
367 * Handle an ARP failure for an active open.
369 static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
371 printk(KERN_ERR MOD "ARP failure duing connect\n");
376 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
379 static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
381 struct cpl_abort_req *req = cplhdr(skb);
383 PDBG("%s t3cdev %p\n", __func__, dev);
384 req->cmd = CPL_ABORT_NO_RST;
385 iwch_cxgb3_ofld_send(dev, skb);
388 static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
390 struct cpl_close_con_req *req;
393 PDBG("%s ep %p\n", __func__, ep);
394 skb = get_skb(NULL, sizeof(*req), gfp);
396 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
399 skb->priority = CPL_PRIORITY_DATA;
400 set_arp_failure_handler(skb, arp_failure_discard);
401 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
403 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
404 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
405 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
408 static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
410 struct cpl_abort_req *req;
412 PDBG("%s ep %p\n", __func__, ep);
413 skb = get_skb(skb, sizeof(*req), gfp);
415 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
419 skb->priority = CPL_PRIORITY_DATA;
420 set_arp_failure_handler(skb, abort_arp_failure);
421 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
422 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
423 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
424 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
425 req->cmd = CPL_ABORT_SEND_RST;
426 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
429 static int send_connect(struct iwch_ep *ep)
431 struct cpl_act_open_req *req;
433 u32 opt0h, opt0l, opt2;
434 unsigned int mtu_idx;
437 PDBG("%s ep %p\n", __func__, ep);
439 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
441 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
445 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
446 wscale = compute_wscale(rcv_win);
451 V_WND_SCALE(wscale) |
453 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
454 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
455 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
456 V_CONG_CONTROL_FLAVOR(cong_flavor);
457 skb->priority = CPL_PRIORITY_SETUP;
458 set_arp_failure_handler(skb, act_open_req_arp_failure);
460 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
463 req->local_port = ep->com.local_addr.sin_port;
464 req->peer_port = ep->com.remote_addr.sin_port;
465 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
466 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
467 req->opt0h = htonl(opt0h);
468 req->opt0l = htonl(opt0l);
470 req->opt2 = htonl(opt2);
471 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
474 static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
477 struct tx_data_wr *req;
478 struct mpa_message *mpa;
481 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
483 BUG_ON(skb_cloned(skb));
485 mpalen = sizeof(*mpa) + ep->plen;
486 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
488 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
490 connect_reply_upcall(ep, -ENOMEM);
495 skb_reserve(skb, sizeof(*req));
496 skb_put(skb, mpalen);
497 skb->priority = CPL_PRIORITY_DATA;
498 mpa = (struct mpa_message *) skb->data;
499 memset(mpa, 0, sizeof(*mpa));
500 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
501 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
502 (markers_enabled ? MPA_MARKERS : 0);
503 mpa->private_data_size = htons(ep->plen);
504 mpa->revision = mpa_rev;
507 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
510 * Reference the mpa skb. This ensures the data area
511 * will remain in memory until the hw acks the tx.
512 * Function tx_ack() will deref it.
515 set_arp_failure_handler(skb, arp_failure_discard);
516 skb_reset_transport_header(skb);
518 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
519 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
520 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
521 req->len = htonl(len);
522 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
523 V_TX_SNDBUF(snd_win>>15));
524 req->flags = htonl(F_TX_INIT);
525 req->sndseq = htonl(ep->snd_seq);
528 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
530 state_set(&ep->com, MPA_REQ_SENT);
534 static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
537 struct tx_data_wr *req;
538 struct mpa_message *mpa;
541 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
543 mpalen = sizeof(*mpa) + plen;
545 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
547 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
550 skb_reserve(skb, sizeof(*req));
551 mpa = (struct mpa_message *) skb_put(skb, mpalen);
552 memset(mpa, 0, sizeof(*mpa));
553 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
554 mpa->flags = MPA_REJECT;
555 mpa->revision = mpa_rev;
556 mpa->private_data_size = htons(plen);
558 memcpy(mpa->private_data, pdata, plen);
561 * Reference the mpa skb again. This ensures the data area
562 * will remain in memory until the hw acks the tx.
563 * Function tx_ack() will deref it.
566 skb->priority = CPL_PRIORITY_DATA;
567 set_arp_failure_handler(skb, arp_failure_discard);
568 skb_reset_transport_header(skb);
569 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
570 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
571 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
572 req->len = htonl(mpalen);
573 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
574 V_TX_SNDBUF(snd_win>>15));
575 req->flags = htonl(F_TX_INIT);
576 req->sndseq = htonl(ep->snd_seq);
579 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
582 static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
585 struct tx_data_wr *req;
586 struct mpa_message *mpa;
590 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
592 mpalen = sizeof(*mpa) + plen;
594 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
596 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
599 skb->priority = CPL_PRIORITY_DATA;
600 skb_reserve(skb, sizeof(*req));
601 mpa = (struct mpa_message *) skb_put(skb, mpalen);
602 memset(mpa, 0, sizeof(*mpa));
603 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
604 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
605 (markers_enabled ? MPA_MARKERS : 0);
606 mpa->revision = mpa_rev;
607 mpa->private_data_size = htons(plen);
609 memcpy(mpa->private_data, pdata, plen);
612 * Reference the mpa skb. This ensures the data area
613 * will remain in memory until the hw acks the tx.
614 * Function tx_ack() will deref it.
617 set_arp_failure_handler(skb, arp_failure_discard);
618 skb_reset_transport_header(skb);
620 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
621 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
622 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
623 req->len = htonl(len);
624 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
625 V_TX_SNDBUF(snd_win>>15));
626 req->flags = htonl(F_TX_INIT);
627 req->sndseq = htonl(ep->snd_seq);
629 state_set(&ep->com, MPA_REP_SENT);
630 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
633 static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
635 struct iwch_ep *ep = ctx;
636 struct cpl_act_establish *req = cplhdr(skb);
637 unsigned int tid = GET_TID(req);
639 PDBG("%s ep %p tid %d\n", __func__, ep, tid);
641 dst_confirm(ep->dst);
643 /* setup the hwtid for this connection */
645 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
647 ep->snd_seq = ntohl(req->snd_isn);
648 ep->rcv_seq = ntohl(req->rcv_isn);
650 set_emss(ep, ntohs(req->tcp_opt));
652 /* dealloc the atid */
653 cxgb3_free_atid(ep->com.tdev, ep->atid);
655 /* start MPA negotiation */
656 send_mpa_req(ep, skb);
661 static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
663 PDBG("%s ep %p\n", __FILE__, ep);
664 state_set(&ep->com, ABORTING);
665 send_abort(ep, skb, gfp);
668 static void close_complete_upcall(struct iwch_ep *ep)
670 struct iw_cm_event event;
672 PDBG("%s ep %p\n", __func__, ep);
673 memset(&event, 0, sizeof(event));
674 event.event = IW_CM_EVENT_CLOSE;
676 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
677 ep, ep->com.cm_id, ep->hwtid);
678 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
679 ep->com.cm_id->rem_ref(ep->com.cm_id);
680 ep->com.cm_id = NULL;
685 static void peer_close_upcall(struct iwch_ep *ep)
687 struct iw_cm_event event;
689 PDBG("%s ep %p\n", __func__, ep);
690 memset(&event, 0, sizeof(event));
691 event.event = IW_CM_EVENT_DISCONNECT;
693 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
694 ep, ep->com.cm_id, ep->hwtid);
695 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
699 static void peer_abort_upcall(struct iwch_ep *ep)
701 struct iw_cm_event event;
703 PDBG("%s ep %p\n", __func__, ep);
704 memset(&event, 0, sizeof(event));
705 event.event = IW_CM_EVENT_CLOSE;
706 event.status = -ECONNRESET;
708 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
709 ep->com.cm_id, ep->hwtid);
710 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
711 ep->com.cm_id->rem_ref(ep->com.cm_id);
712 ep->com.cm_id = NULL;
717 static void connect_reply_upcall(struct iwch_ep *ep, int status)
719 struct iw_cm_event event;
721 PDBG("%s ep %p status %d\n", __func__, ep, status);
722 memset(&event, 0, sizeof(event));
723 event.event = IW_CM_EVENT_CONNECT_REPLY;
724 event.status = status;
725 event.local_addr = ep->com.local_addr;
726 event.remote_addr = ep->com.remote_addr;
728 if ((status == 0) || (status == -ECONNREFUSED)) {
729 event.private_data_len = ep->plen;
730 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
733 PDBG("%s ep %p tid %d status %d\n", __func__, ep,
735 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
738 ep->com.cm_id->rem_ref(ep->com.cm_id);
739 ep->com.cm_id = NULL;
744 static void connect_request_upcall(struct iwch_ep *ep)
746 struct iw_cm_event event;
748 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
749 memset(&event, 0, sizeof(event));
750 event.event = IW_CM_EVENT_CONNECT_REQUEST;
751 event.local_addr = ep->com.local_addr;
752 event.remote_addr = ep->com.remote_addr;
753 event.private_data_len = ep->plen;
754 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
755 event.provider_data = ep;
756 if (state_read(&ep->parent_ep->com) != DEAD) {
758 ep->parent_ep->com.cm_id->event_handler(
759 ep->parent_ep->com.cm_id,
762 put_ep(&ep->parent_ep->com);
763 ep->parent_ep = NULL;
766 static void established_upcall(struct iwch_ep *ep)
768 struct iw_cm_event event;
770 PDBG("%s ep %p\n", __func__, ep);
771 memset(&event, 0, sizeof(event));
772 event.event = IW_CM_EVENT_ESTABLISHED;
774 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
775 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
779 static int update_rx_credits(struct iwch_ep *ep, u32 credits)
781 struct cpl_rx_data_ack *req;
784 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
785 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
787 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
791 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
792 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
793 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
794 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
795 skb->priority = CPL_PRIORITY_ACK;
796 iwch_cxgb3_ofld_send(ep->com.tdev, skb);
800 static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
802 struct mpa_message *mpa;
804 struct iwch_qp_attributes attrs;
805 enum iwch_qp_attr_mask mask;
808 PDBG("%s ep %p\n", __func__, ep);
811 * Stop mpa timer. If it expired, then the state has
812 * changed and we bail since ep_timeout already aborted
816 if (state_read(&ep->com) != MPA_REQ_SENT)
820 * If we get more than the supported amount of private data
821 * then we must fail this connection.
823 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
829 * copy the new data into our accumulation buffer.
831 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
833 ep->mpa_pkt_len += skb->len;
836 * if we don't even have the mpa message, then bail.
838 if (ep->mpa_pkt_len < sizeof(*mpa))
840 mpa = (struct mpa_message *) ep->mpa_pkt;
842 /* Validate MPA header. */
843 if (mpa->revision != mpa_rev) {
847 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
852 plen = ntohs(mpa->private_data_size);
855 * Fail if there's too much private data.
857 if (plen > MPA_MAX_PRIVATE_DATA) {
863 * If plen does not account for pkt size
865 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
870 ep->plen = (u8) plen;
873 * If we don't have all the pdata yet, then bail.
874 * We'll continue process when more data arrives.
876 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
879 if (mpa->flags & MPA_REJECT) {
885 * If we get here we have accumulated the entire mpa
886 * start reply message including private data. And
887 * the MPA header is valid.
889 state_set(&ep->com, FPDU_MODE);
890 ep->mpa_attr.initiator = 1;
891 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
892 ep->mpa_attr.recv_marker_enabled = markers_enabled;
893 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
894 ep->mpa_attr.version = mpa_rev;
895 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
896 "xmit_marker_enabled=%d, version=%d\n", __func__,
897 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
898 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
900 attrs.mpa_attr = ep->mpa_attr;
901 attrs.max_ird = ep->ird;
902 attrs.max_ord = ep->ord;
903 attrs.llp_stream_handle = ep;
904 attrs.next_state = IWCH_QP_STATE_RTS;
906 mask = IWCH_QP_ATTR_NEXT_STATE |
907 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
908 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
910 /* bind QP and TID with INIT_WR */
911 err = iwch_modify_qp(ep->com.qp->rhp,
912 ep->com.qp, mask, &attrs, 1);
916 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
917 iwch_post_zb_read(ep);
922 abort_connection(ep, skb, GFP_KERNEL);
924 connect_reply_upcall(ep, err);
928 static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
930 struct mpa_message *mpa;
933 PDBG("%s ep %p\n", __func__, ep);
936 * Stop mpa timer. If it expired, then the state has
937 * changed and we bail since ep_timeout already aborted
941 if (state_read(&ep->com) != MPA_REQ_WAIT)
945 * If we get more than the supported amount of private data
946 * then we must fail this connection.
948 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
949 abort_connection(ep, skb, GFP_KERNEL);
953 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
956 * Copy the new data into our accumulation buffer.
958 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
960 ep->mpa_pkt_len += skb->len;
963 * If we don't even have the mpa message, then bail.
964 * We'll continue process when more data arrives.
966 if (ep->mpa_pkt_len < sizeof(*mpa))
968 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
969 mpa = (struct mpa_message *) ep->mpa_pkt;
972 * Validate MPA Header.
974 if (mpa->revision != mpa_rev) {
975 abort_connection(ep, skb, GFP_KERNEL);
979 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
980 abort_connection(ep, skb, GFP_KERNEL);
984 plen = ntohs(mpa->private_data_size);
987 * Fail if there's too much private data.
989 if (plen > MPA_MAX_PRIVATE_DATA) {
990 abort_connection(ep, skb, GFP_KERNEL);
995 * If plen does not account for pkt size
997 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
998 abort_connection(ep, skb, GFP_KERNEL);
1001 ep->plen = (u8) plen;
1004 * If we don't have all the pdata yet, then bail.
1006 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1010 * If we get here we have accumulated the entire mpa
1011 * start reply message including private data.
1013 ep->mpa_attr.initiator = 0;
1014 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1015 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1016 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1017 ep->mpa_attr.version = mpa_rev;
1018 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1019 "xmit_marker_enabled=%d, version=%d\n", __func__,
1020 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1021 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1023 state_set(&ep->com, MPA_REQ_RCVD);
1026 connect_request_upcall(ep);
1030 static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1032 struct iwch_ep *ep = ctx;
1033 struct cpl_rx_data *hdr = cplhdr(skb);
1034 unsigned int dlen = ntohs(hdr->len);
1036 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
1038 skb_pull(skb, sizeof(*hdr));
1039 skb_trim(skb, dlen);
1041 ep->rcv_seq += dlen;
1042 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1044 switch (state_read(&ep->com)) {
1046 process_mpa_reply(ep, skb);
1049 process_mpa_request(ep, skb);
1054 printk(KERN_ERR MOD "%s Unexpected streaming data."
1055 " ep %p state %d tid %d\n",
1056 __func__, ep, state_read(&ep->com), ep->hwtid);
1059 * The ep will timeout and inform the ULP of the failure.
1065 /* update RX credits */
1066 update_rx_credits(ep, dlen);
1068 return CPL_RET_BUF_DONE;
1072 * Upcall from the adapter indicating data has been transmitted.
1073 * For us its just the single MPA request or reply. We can now free
1074 * the skb holding the mpa message.
1076 static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1078 struct iwch_ep *ep = ctx;
1079 struct cpl_wr_ack *hdr = cplhdr(skb);
1080 unsigned int credits = ntohs(hdr->credits);
1081 unsigned long flags;
1084 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1087 PDBG("%s 0 credit ack ep %p state %u\n",
1088 __func__, ep, state_read(&ep->com));
1089 return CPL_RET_BUF_DONE;
1092 spin_lock_irqsave(&ep->com.lock, flags);
1093 BUG_ON(credits != 1);
1094 dst_confirm(ep->dst);
1096 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1097 __func__, ep, ep->com.state);
1098 if (ep->mpa_attr.initiator) {
1099 PDBG("%s initiator ep %p state %u\n",
1100 __func__, ep, ep->com.state);
1101 if (peer2peer && ep->com.state == FPDU_MODE)
1104 PDBG("%s responder ep %p state %u\n",
1105 __func__, ep, ep->com.state);
1106 if (ep->com.state == MPA_REQ_RCVD) {
1107 ep->com.rpl_done = 1;
1108 wake_up(&ep->com.waitq);
1112 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1113 __func__, ep, ep->com.state);
1114 kfree_skb(ep->mpa_skb);
1117 spin_unlock_irqrestore(&ep->com.lock, flags);
1119 iwch_post_zb_read(ep);
1120 return CPL_RET_BUF_DONE;
1123 static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1125 struct iwch_ep *ep = ctx;
1126 unsigned long flags;
1129 PDBG("%s ep %p\n", __func__, ep);
1133 * We get 2 abort replies from the HW. The first one must
1134 * be ignored except for scribbling that we need one more.
1136 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
1137 return CPL_RET_BUF_DONE;
1140 spin_lock_irqsave(&ep->com.lock, flags);
1141 switch (ep->com.state) {
1143 close_complete_upcall(ep);
1144 __state_set(&ep->com, DEAD);
1148 printk(KERN_ERR "%s ep %p state %d\n",
1149 __func__, ep, ep->com.state);
1152 spin_unlock_irqrestore(&ep->com.lock, flags);
1155 release_ep_resources(ep);
1156 return CPL_RET_BUF_DONE;
1160 * Return whether a failed active open has allocated a TID
1162 static inline int act_open_has_tid(int status)
1164 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1165 status != CPL_ERR_ARP_MISS;
1168 static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1170 struct iwch_ep *ep = ctx;
1171 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1173 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
1174 status2errno(rpl->status));
1175 connect_reply_upcall(ep, status2errno(rpl->status));
1176 state_set(&ep->com, DEAD);
1177 if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
1178 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1179 cxgb3_free_atid(ep->com.tdev, ep->atid);
1180 dst_release(ep->dst);
1181 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1183 return CPL_RET_BUF_DONE;
1186 static int listen_start(struct iwch_listen_ep *ep)
1188 struct sk_buff *skb;
1189 struct cpl_pass_open_req *req;
1191 PDBG("%s ep %p\n", __func__, ep);
1192 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1194 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
1198 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
1199 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1200 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1201 req->local_port = ep->com.local_addr.sin_port;
1202 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1205 req->peer_netmask = 0;
1206 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1207 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1208 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1211 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1214 static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1216 struct iwch_listen_ep *ep = ctx;
1217 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1219 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1220 rpl->status, status2errno(rpl->status));
1221 ep->com.rpl_err = status2errno(rpl->status);
1222 ep->com.rpl_done = 1;
1223 wake_up(&ep->com.waitq);
1225 return CPL_RET_BUF_DONE;
1228 static int listen_stop(struct iwch_listen_ep *ep)
1230 struct sk_buff *skb;
1231 struct cpl_close_listserv_req *req;
1233 PDBG("%s ep %p\n", __func__, ep);
1234 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1236 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1239 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1240 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1242 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1244 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1247 static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1250 struct iwch_listen_ep *ep = ctx;
1251 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1253 PDBG("%s ep %p\n", __func__, ep);
1254 ep->com.rpl_err = status2errno(rpl->status);
1255 ep->com.rpl_done = 1;
1256 wake_up(&ep->com.waitq);
1257 return CPL_RET_BUF_DONE;
1260 static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1262 struct cpl_pass_accept_rpl *rpl;
1263 unsigned int mtu_idx;
1264 u32 opt0h, opt0l, opt2;
1267 PDBG("%s ep %p\n", __func__, ep);
1268 BUG_ON(skb_cloned(skb));
1269 skb_trim(skb, sizeof(*rpl));
1271 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1272 wscale = compute_wscale(rcv_win);
1273 opt0h = V_NAGLE(0) |
1277 V_WND_SCALE(wscale) |
1278 V_MSS_IDX(mtu_idx) |
1279 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1280 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1281 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1282 V_CONG_CONTROL_FLAVOR(cong_flavor);
1285 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1286 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1287 rpl->peer_ip = peer_ip;
1288 rpl->opt0h = htonl(opt0h);
1289 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1290 rpl->opt2 = htonl(opt2);
1291 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1292 skb->priority = CPL_PRIORITY_SETUP;
1293 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
1298 static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1299 struct sk_buff *skb)
1301 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
1303 BUG_ON(skb_cloned(skb));
1304 skb_trim(skb, sizeof(struct cpl_tid_release));
1307 if (tdev->type != T3A)
1308 release_tid(tdev, hwtid, skb);
1310 struct cpl_pass_accept_rpl *rpl;
1313 skb->priority = CPL_PRIORITY_SETUP;
1314 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1315 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1317 rpl->peer_ip = peer_ip;
1318 rpl->opt0h = htonl(F_TCAM_BYPASS);
1319 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1321 rpl->rsvd = rpl->opt2;
1322 iwch_cxgb3_ofld_send(tdev, skb);
1326 static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1328 struct iwch_ep *child_ep, *parent_ep = ctx;
1329 struct cpl_pass_accept_req *req = cplhdr(skb);
1330 unsigned int hwtid = GET_TID(req);
1331 struct dst_entry *dst;
1332 struct l2t_entry *l2t;
1336 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1338 if (state_read(&parent_ep->com) != LISTEN) {
1339 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1345 * Find the netdev for this connection request.
1347 tim.mac_addr = req->dst_mac;
1348 tim.vlan_tag = ntohs(req->vlan_tag);
1349 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1350 printk(KERN_ERR "%s bad dst mac %pM\n",
1351 __func__, req->dst_mac);
1355 /* Find output route */
1356 rt = find_route(tdev,
1360 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1362 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1367 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1369 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1374 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1376 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1378 l2t_release(L2DATA(tdev), l2t);
1382 state_set(&child_ep->com, CONNECTING);
1383 child_ep->com.tdev = tdev;
1384 child_ep->com.cm_id = NULL;
1385 child_ep->com.local_addr.sin_family = PF_INET;
1386 child_ep->com.local_addr.sin_port = req->local_port;
1387 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1388 child_ep->com.remote_addr.sin_family = PF_INET;
1389 child_ep->com.remote_addr.sin_port = req->peer_port;
1390 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1391 get_ep(&parent_ep->com);
1392 child_ep->parent_ep = parent_ep;
1393 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1394 child_ep->l2t = l2t;
1395 child_ep->dst = dst;
1396 child_ep->hwtid = hwtid;
1397 init_timer(&child_ep->timer);
1398 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1399 accept_cr(child_ep, req->peer_ip, skb);
1402 reject_cr(tdev, hwtid, req->peer_ip, skb);
1404 return CPL_RET_BUF_DONE;
1407 static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1409 struct iwch_ep *ep = ctx;
1410 struct cpl_pass_establish *req = cplhdr(skb);
1412 PDBG("%s ep %p\n", __func__, ep);
1413 ep->snd_seq = ntohl(req->snd_isn);
1414 ep->rcv_seq = ntohl(req->rcv_isn);
1416 set_emss(ep, ntohs(req->tcp_opt));
1418 dst_confirm(ep->dst);
1419 state_set(&ep->com, MPA_REQ_WAIT);
1422 return CPL_RET_BUF_DONE;
1425 static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1427 struct iwch_ep *ep = ctx;
1428 struct iwch_qp_attributes attrs;
1429 unsigned long flags;
1433 PDBG("%s ep %p\n", __func__, ep);
1434 dst_confirm(ep->dst);
1436 spin_lock_irqsave(&ep->com.lock, flags);
1437 switch (ep->com.state) {
1439 __state_set(&ep->com, CLOSING);
1442 __state_set(&ep->com, CLOSING);
1443 connect_reply_upcall(ep, -ECONNRESET);
1448 * We're gonna mark this puppy DEAD, but keep
1449 * the reference on it until the ULP accepts or
1450 * rejects the CR. Also wake up anyone waiting
1451 * in rdma connection migration (see iwch_accept_cr()).
1453 __state_set(&ep->com, CLOSING);
1454 ep->com.rpl_done = 1;
1455 ep->com.rpl_err = -ECONNRESET;
1456 PDBG("waking up ep %p\n", ep);
1457 wake_up(&ep->com.waitq);
1460 __state_set(&ep->com, CLOSING);
1461 ep->com.rpl_done = 1;
1462 ep->com.rpl_err = -ECONNRESET;
1463 PDBG("waking up ep %p\n", ep);
1464 wake_up(&ep->com.waitq);
1468 __state_set(&ep->com, CLOSING);
1469 attrs.next_state = IWCH_QP_STATE_CLOSING;
1470 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1471 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1472 peer_close_upcall(ep);
1478 __state_set(&ep->com, MORIBUND);
1483 if (ep->com.cm_id && ep->com.qp) {
1484 attrs.next_state = IWCH_QP_STATE_IDLE;
1485 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1486 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1488 close_complete_upcall(ep);
1489 __state_set(&ep->com, DEAD);
1499 spin_unlock_irqrestore(&ep->com.lock, flags);
1501 iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1503 release_ep_resources(ep);
1504 return CPL_RET_BUF_DONE;
1508 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1510 static int is_neg_adv_abort(unsigned int status)
1512 return status == CPL_ERR_RTX_NEG_ADVICE ||
1513 status == CPL_ERR_PERSIST_NEG_ADVICE;
1516 static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1518 struct cpl_abort_req_rss *req = cplhdr(skb);
1519 struct iwch_ep *ep = ctx;
1520 struct cpl_abort_rpl *rpl;
1521 struct sk_buff *rpl_skb;
1522 struct iwch_qp_attributes attrs;
1525 unsigned long flags;
1527 if (is_neg_adv_abort(req->status)) {
1528 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1530 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1531 return CPL_RET_BUF_DONE;
1535 * We get 2 peer aborts from the HW. The first one must
1536 * be ignored except for scribbling that we need one more.
1538 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
1539 return CPL_RET_BUF_DONE;
1542 spin_lock_irqsave(&ep->com.lock, flags);
1543 PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
1544 switch (ep->com.state) {
1552 connect_reply_upcall(ep, -ECONNRESET);
1555 ep->com.rpl_done = 1;
1556 ep->com.rpl_err = -ECONNRESET;
1557 PDBG("waking up ep %p\n", ep);
1558 wake_up(&ep->com.waitq);
1563 * We're gonna mark this puppy DEAD, but keep
1564 * the reference on it until the ULP accepts or
1565 * rejects the CR. Also wake up anyone waiting
1566 * in rdma connection migration (see iwch_accept_cr()).
1568 ep->com.rpl_done = 1;
1569 ep->com.rpl_err = -ECONNRESET;
1570 PDBG("waking up ep %p\n", ep);
1571 wake_up(&ep->com.waitq);
1578 if (ep->com.cm_id && ep->com.qp) {
1579 attrs.next_state = IWCH_QP_STATE_ERROR;
1580 ret = iwch_modify_qp(ep->com.qp->rhp,
1581 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1585 "%s - qp <- error failed!\n",
1588 peer_abort_upcall(ep);
1593 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1594 spin_unlock_irqrestore(&ep->com.lock, flags);
1595 return CPL_RET_BUF_DONE;
1600 dst_confirm(ep->dst);
1601 if (ep->com.state != ABORTING) {
1602 __state_set(&ep->com, DEAD);
1605 spin_unlock_irqrestore(&ep->com.lock, flags);
1607 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1609 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1614 rpl_skb->priority = CPL_PRIORITY_DATA;
1615 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1616 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1617 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1618 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1619 rpl->cmd = CPL_ABORT_NO_RST;
1620 iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
1623 release_ep_resources(ep);
1624 return CPL_RET_BUF_DONE;
1627 static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1629 struct iwch_ep *ep = ctx;
1630 struct iwch_qp_attributes attrs;
1631 unsigned long flags;
1634 PDBG("%s ep %p\n", __func__, ep);
1637 /* The cm_id may be null if we failed to connect */
1638 spin_lock_irqsave(&ep->com.lock, flags);
1639 switch (ep->com.state) {
1641 __state_set(&ep->com, MORIBUND);
1645 if ((ep->com.cm_id) && (ep->com.qp)) {
1646 attrs.next_state = IWCH_QP_STATE_IDLE;
1647 iwch_modify_qp(ep->com.qp->rhp,
1649 IWCH_QP_ATTR_NEXT_STATE,
1652 close_complete_upcall(ep);
1653 __state_set(&ep->com, DEAD);
1663 spin_unlock_irqrestore(&ep->com.lock, flags);
1665 release_ep_resources(ep);
1666 return CPL_RET_BUF_DONE;
1670 * T3A does 3 things when a TERM is received:
1671 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1672 * 2) generate an async event on the QP with the TERMINATE opcode
1673 * 3) post a TERMINATE opcde cqe into the associated CQ.
1675 * For (1), we save the message in the qp for later consumer consumption.
1676 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1677 * For (3), we toss the CQE in cxio_poll_cq().
1679 * terminate() handles case (1)...
1681 static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1683 struct iwch_ep *ep = ctx;
1685 if (state_read(&ep->com) != FPDU_MODE)
1686 return CPL_RET_BUF_DONE;
1688 PDBG("%s ep %p\n", __func__, ep);
1689 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1690 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1691 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1693 ep->com.qp->attr.terminate_msg_len = skb->len;
1694 ep->com.qp->attr.is_terminate_local = 0;
1695 return CPL_RET_BUF_DONE;
1698 static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1700 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1701 struct iwch_ep *ep = ctx;
1703 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
1706 struct iwch_qp_attributes attrs;
1708 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1709 __func__, ep->hwtid);
1711 attrs.next_state = IWCH_QP_STATE_ERROR;
1712 iwch_modify_qp(ep->com.qp->rhp,
1713 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1715 abort_connection(ep, NULL, GFP_KERNEL);
1717 return CPL_RET_BUF_DONE;
1720 static void ep_timeout(unsigned long arg)
1722 struct iwch_ep *ep = (struct iwch_ep *)arg;
1723 struct iwch_qp_attributes attrs;
1724 unsigned long flags;
1727 spin_lock_irqsave(&ep->com.lock, flags);
1728 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1730 switch (ep->com.state) {
1732 __state_set(&ep->com, ABORTING);
1733 connect_reply_upcall(ep, -ETIMEDOUT);
1736 __state_set(&ep->com, ABORTING);
1740 if (ep->com.cm_id && ep->com.qp) {
1741 attrs.next_state = IWCH_QP_STATE_ERROR;
1742 iwch_modify_qp(ep->com.qp->rhp,
1743 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1746 __state_set(&ep->com, ABORTING);
1749 printk(KERN_ERR "%s unexpected state ep %p state %u\n",
1750 __func__, ep, ep->com.state);
1754 spin_unlock_irqrestore(&ep->com.lock, flags);
1756 abort_connection(ep, NULL, GFP_ATOMIC);
1760 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1763 struct iwch_ep *ep = to_ep(cm_id);
1764 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1766 if (state_read(&ep->com) == DEAD) {
1770 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1772 abort_connection(ep, NULL, GFP_KERNEL);
1774 err = send_mpa_reject(ep, pdata, pdata_len);
1775 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1781 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1784 struct iwch_qp_attributes attrs;
1785 enum iwch_qp_attr_mask mask;
1786 struct iwch_ep *ep = to_ep(cm_id);
1787 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1788 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1790 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1791 if (state_read(&ep->com) == DEAD) {
1796 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1799 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1800 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1801 abort_connection(ep, NULL, GFP_KERNEL);
1806 cm_id->add_ref(cm_id);
1807 ep->com.cm_id = cm_id;
1810 ep->ird = conn_param->ird;
1811 ep->ord = conn_param->ord;
1813 if (peer2peer && ep->ird == 0)
1816 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1818 /* bind QP to EP and move to RTS */
1819 attrs.mpa_attr = ep->mpa_attr;
1820 attrs.max_ird = ep->ird;
1821 attrs.max_ord = ep->ord;
1822 attrs.llp_stream_handle = ep;
1823 attrs.next_state = IWCH_QP_STATE_RTS;
1825 /* bind QP and TID with INIT_WR */
1826 mask = IWCH_QP_ATTR_NEXT_STATE |
1827 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1828 IWCH_QP_ATTR_MPA_ATTR |
1829 IWCH_QP_ATTR_MAX_IRD |
1830 IWCH_QP_ATTR_MAX_ORD;
1832 err = iwch_modify_qp(ep->com.qp->rhp,
1833 ep->com.qp, mask, &attrs, 1);
1837 /* if needed, wait for wr_ack */
1838 if (iwch_rqes_posted(qp)) {
1839 wait_event(ep->com.waitq, ep->com.rpl_done);
1840 err = ep->com.rpl_err;
1845 err = send_mpa_reply(ep, conn_param->private_data,
1846 conn_param->private_data_len);
1851 state_set(&ep->com, FPDU_MODE);
1852 established_upcall(ep);
1856 ep->com.cm_id = NULL;
1858 cm_id->rem_ref(cm_id);
1864 static int is_loopback_dst(struct iw_cm_id *cm_id)
1866 struct net_device *dev;
1868 dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
1875 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1878 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1882 if (is_loopback_dst(cm_id)) {
1887 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1889 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1893 init_timer(&ep->timer);
1894 ep->plen = conn_param->private_data_len;
1896 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1897 conn_param->private_data, ep->plen);
1898 ep->ird = conn_param->ird;
1899 ep->ord = conn_param->ord;
1901 if (peer2peer && ep->ord == 0)
1904 ep->com.tdev = h->rdev.t3cdev_p;
1906 cm_id->add_ref(cm_id);
1907 ep->com.cm_id = cm_id;
1908 ep->com.qp = get_qhp(h, conn_param->qpn);
1909 BUG_ON(!ep->com.qp);
1910 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1914 * Allocate an active TID to initiate a TCP connection.
1916 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1917 if (ep->atid == -1) {
1918 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1924 rt = find_route(h->rdev.t3cdev_p,
1925 cm_id->local_addr.sin_addr.s_addr,
1926 cm_id->remote_addr.sin_addr.s_addr,
1927 cm_id->local_addr.sin_port,
1928 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1930 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1931 err = -EHOSTUNREACH;
1936 /* get a l2t entry */
1937 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1938 ep->dst->neighbour->dev);
1940 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1945 state_set(&ep->com, CONNECTING);
1946 ep->tos = IPTOS_LOWDELAY;
1947 ep->com.local_addr = cm_id->local_addr;
1948 ep->com.remote_addr = cm_id->remote_addr;
1950 /* send connect request to rnic */
1951 err = send_connect(ep);
1955 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
1957 dst_release(ep->dst);
1959 cxgb3_free_atid(ep->com.tdev, ep->atid);
1961 cm_id->rem_ref(cm_id);
1967 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1970 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1971 struct iwch_listen_ep *ep;
1976 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1978 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1982 PDBG("%s ep %p\n", __func__, ep);
1983 ep->com.tdev = h->rdev.t3cdev_p;
1984 cm_id->add_ref(cm_id);
1985 ep->com.cm_id = cm_id;
1986 ep->backlog = backlog;
1987 ep->com.local_addr = cm_id->local_addr;
1990 * Allocate a server TID.
1992 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
1993 if (ep->stid == -1) {
1994 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1999 state_set(&ep->com, LISTEN);
2000 err = listen_start(ep);
2004 /* wait for pass_open_rpl */
2005 wait_event(ep->com.waitq, ep->com.rpl_done);
2006 err = ep->com.rpl_err;
2008 cm_id->provider_data = ep;
2012 cxgb3_free_stid(ep->com.tdev, ep->stid);
2014 cm_id->rem_ref(cm_id);
2021 int iwch_destroy_listen(struct iw_cm_id *cm_id)
2024 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
2026 PDBG("%s ep %p\n", __func__, ep);
2029 state_set(&ep->com, DEAD);
2030 ep->com.rpl_done = 0;
2031 ep->com.rpl_err = 0;
2032 err = listen_stop(ep);
2035 wait_event(ep->com.waitq, ep->com.rpl_done);
2036 cxgb3_free_stid(ep->com.tdev, ep->stid);
2038 err = ep->com.rpl_err;
2039 cm_id->rem_ref(cm_id);
2044 int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2047 unsigned long flags;
2050 struct t3cdev *tdev;
2051 struct cxio_rdev *rdev;
2053 spin_lock_irqsave(&ep->com.lock, flags);
2055 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2056 states[ep->com.state], abrupt);
2058 tdev = (struct t3cdev *)ep->com.tdev;
2059 rdev = (struct cxio_rdev *)tdev->ulp;
2060 if (cxio_fatal_error(rdev)) {
2062 close_complete_upcall(ep);
2063 ep->com.state = DEAD;
2065 switch (ep->com.state) {
2073 ep->com.state = ABORTING;
2075 ep->com.state = CLOSING;
2078 set_bit(CLOSE_SENT, &ep->com.flags);
2081 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2085 ep->com.state = ABORTING;
2087 ep->com.state = MORIBUND;
2093 PDBG("%s ignoring disconnect ep %p state %u\n",
2094 __func__, ep, ep->com.state);
2101 spin_unlock_irqrestore(&ep->com.lock, flags);
2104 ret = send_abort(ep, NULL, gfp);
2106 ret = send_halfclose(ep, gfp);
2111 release_ep_resources(ep);
2115 int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2116 struct l2t_entry *l2t)
2118 struct iwch_ep *ep = ctx;
2123 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2126 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2134 * All the CM events are handled on a work queue to have a safe context.
2135 * These are the real handlers that are called from the work queue.
2137 static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2138 [CPL_ACT_ESTABLISH] = act_establish,
2139 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2140 [CPL_RX_DATA] = rx_data,
2141 [CPL_TX_DMA_ACK] = tx_ack,
2142 [CPL_ABORT_RPL_RSS] = abort_rpl,
2143 [CPL_ABORT_RPL] = abort_rpl,
2144 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2145 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2146 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2147 [CPL_PASS_ESTABLISH] = pass_establish,
2148 [CPL_PEER_CLOSE] = peer_close,
2149 [CPL_ABORT_REQ_RSS] = peer_abort,
2150 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2151 [CPL_RDMA_TERMINATE] = terminate,
2152 [CPL_RDMA_EC_STATUS] = ec_status,
2155 static void process_work(struct work_struct *work)
2157 struct sk_buff *skb = NULL;
2159 struct t3cdev *tdev;
2162 while ((skb = skb_dequeue(&rxq))) {
2163 ep = *((void **) (skb->cb));
2164 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2165 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2166 if (ret & CPL_RET_BUF_DONE)
2170 * ep was referenced in sched(), and is freed here.
2172 put_ep((struct iwch_ep_common *)ep);
2176 static DECLARE_WORK(skb_work, process_work);
2178 static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2180 struct iwch_ep_common *epc = ctx;
2185 * Save ctx and tdev in the skb->cb area.
2187 *((void **) skb->cb) = ctx;
2188 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2191 * Queue the skb and schedule the worker thread.
2193 skb_queue_tail(&rxq, skb);
2194 queue_work(workq, &skb_work);
2198 static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2200 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2202 if (rpl->status != CPL_ERR_NONE) {
2203 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2204 "for tid %u\n", rpl->status, GET_TID(rpl));
2206 return CPL_RET_BUF_DONE;
2210 * All upcalls from the T3 Core go to sched() to schedule the
2211 * processing on a work queue.
2213 cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2214 [CPL_ACT_ESTABLISH] = sched,
2215 [CPL_ACT_OPEN_RPL] = sched,
2216 [CPL_RX_DATA] = sched,
2217 [CPL_TX_DMA_ACK] = sched,
2218 [CPL_ABORT_RPL_RSS] = sched,
2219 [CPL_ABORT_RPL] = sched,
2220 [CPL_PASS_OPEN_RPL] = sched,
2221 [CPL_CLOSE_LISTSRV_RPL] = sched,
2222 [CPL_PASS_ACCEPT_REQ] = sched,
2223 [CPL_PASS_ESTABLISH] = sched,
2224 [CPL_PEER_CLOSE] = sched,
2225 [CPL_CLOSE_CON_RPL] = sched,
2226 [CPL_ABORT_REQ_RSS] = sched,
2227 [CPL_RDMA_TERMINATE] = sched,
2228 [CPL_RDMA_EC_STATUS] = sched,
2229 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2232 int __init iwch_cm_init(void)
2234 skb_queue_head_init(&rxq);
2236 workq = create_singlethread_workqueue("iw_cxgb3");
2243 void __exit iwch_cm_term(void)
2245 flush_workqueue(workq);
2246 destroy_workqueue(workq);