2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/slab.h>
35 #include <linux/workqueue.h>
36 #include <linux/skbuff.h>
37 #include <linux/timer.h>
38 #include <linux/notifier.h>
39 #include <linux/inetdevice.h>
41 #include <net/neighbour.h>
42 #include <net/netevent.h>
43 #include <net/route.h>
46 #include "cxgb3_offload.h"
48 #include "iwch_provider.h"
51 static char *states[] = {
68 module_param(peer2peer, int, 0644);
69 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
71 static int ep_timeout_secs = 60;
72 module_param(ep_timeout_secs, int, 0644);
73 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
74 "in seconds (default=60)");
76 static int mpa_rev = 1;
77 module_param(mpa_rev, int, 0644);
78 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
79 "1 is spec compliant. (default=1)");
81 static int markers_enabled = 0;
82 module_param(markers_enabled, int, 0644);
83 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
85 static int crc_enabled = 1;
86 module_param(crc_enabled, int, 0644);
87 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
89 static int rcv_win = 256 * 1024;
90 module_param(rcv_win, int, 0644);
91 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
93 static int snd_win = 32 * 1024;
94 module_param(snd_win, int, 0644);
95 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
97 static unsigned int nocong = 0;
98 module_param(nocong, uint, 0644);
99 MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
101 static unsigned int cong_flavor = 1;
102 module_param(cong_flavor, uint, 0644);
103 MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
105 static void process_work(struct work_struct *work);
106 static struct workqueue_struct *workq;
107 static DECLARE_WORK(skb_work, process_work);
109 static struct sk_buff_head rxq;
110 static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
112 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
113 static void ep_timeout(unsigned long arg);
114 static void connect_reply_upcall(struct iwch_ep *ep, int status);
116 static void start_ep_timer(struct iwch_ep *ep)
118 PDBG("%s ep %p\n", __func__, ep);
119 if (timer_pending(&ep->timer)) {
120 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
121 del_timer_sync(&ep->timer);
124 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
125 ep->timer.data = (unsigned long)ep;
126 ep->timer.function = ep_timeout;
127 add_timer(&ep->timer);
130 static void stop_ep_timer(struct iwch_ep *ep)
132 PDBG("%s ep %p\n", __func__, ep);
133 if (!timer_pending(&ep->timer)) {
134 printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
135 __func__, ep, ep->com.state);
139 del_timer_sync(&ep->timer);
143 int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
146 struct cxio_rdev *rdev;
148 rdev = (struct cxio_rdev *)tdev->ulp;
149 if (cxio_fatal_error(rdev)) {
153 error = l2t_send(tdev, skb, l2e);
159 int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
162 struct cxio_rdev *rdev;
164 rdev = (struct cxio_rdev *)tdev->ulp;
165 if (cxio_fatal_error(rdev)) {
169 error = cxgb3_ofld_send(tdev, skb);
175 static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
177 struct cpl_tid_release *req;
179 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
182 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
183 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
184 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
185 skb->priority = CPL_PRIORITY_SETUP;
186 iwch_cxgb3_ofld_send(tdev, skb);
190 int iwch_quiesce_tid(struct iwch_ep *ep)
192 struct cpl_set_tcb_field *req;
193 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
197 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
198 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
199 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
200 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
203 req->word = htons(W_TCB_RX_QUIESCE);
204 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
205 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
207 skb->priority = CPL_PRIORITY_DATA;
208 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
211 int iwch_resume_tid(struct iwch_ep *ep)
213 struct cpl_set_tcb_field *req;
214 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
218 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
219 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
220 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
221 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
224 req->word = htons(W_TCB_RX_QUIESCE);
225 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
228 skb->priority = CPL_PRIORITY_DATA;
229 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
232 static void set_emss(struct iwch_ep *ep, u16 opt)
234 PDBG("%s ep %p opt %u\n", __func__, ep, opt);
235 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
236 if (G_TCPOPT_TSTAMP(opt))
240 PDBG("emss=%d\n", ep->emss);
243 static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
246 enum iwch_ep_state state;
248 spin_lock_irqsave(&epc->lock, flags);
250 spin_unlock_irqrestore(&epc->lock, flags);
254 static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
259 static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
263 spin_lock_irqsave(&epc->lock, flags);
264 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
265 __state_set(epc, new);
266 spin_unlock_irqrestore(&epc->lock, flags);
270 static void *alloc_ep(int size, gfp_t gfp)
272 struct iwch_ep_common *epc;
274 epc = kzalloc(size, gfp);
276 kref_init(&epc->kref);
277 spin_lock_init(&epc->lock);
278 init_waitqueue_head(&epc->waitq);
280 PDBG("%s alloc ep %p\n", __func__, epc);
284 void __free_ep(struct kref *kref)
287 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
288 struct iwch_ep, com);
289 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
290 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
291 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
292 dst_release(ep->dst);
293 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
298 static void release_ep_resources(struct iwch_ep *ep)
300 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
301 set_bit(RELEASE_RESOURCES, &ep->com.flags);
305 static void process_work(struct work_struct *work)
307 struct sk_buff *skb = NULL;
312 while ((skb = skb_dequeue(&rxq))) {
313 ep = *((void **) (skb->cb));
314 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
315 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
316 if (ret & CPL_RET_BUF_DONE)
320 * ep was referenced in sched(), and is freed here.
322 put_ep((struct iwch_ep_common *)ep);
326 static int status2errno(int status)
331 case CPL_ERR_CONN_RESET:
333 case CPL_ERR_ARP_MISS:
334 return -EHOSTUNREACH;
335 case CPL_ERR_CONN_TIMEDOUT:
337 case CPL_ERR_TCAM_FULL:
339 case CPL_ERR_CONN_EXIST:
347 * Try and reuse skbs already allocated...
349 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
351 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
355 skb = alloc_skb(len, gfp);
360 static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
361 __be32 peer_ip, __be16 local_port,
362 __be16 peer_port, u8 tos)
373 .proto = IPPROTO_TCP,
381 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
386 static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
390 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
395 static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
397 PDBG("%s t3cdev %p\n", __func__, dev);
402 * Handle an ARP failure for an active open.
404 static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
406 printk(KERN_ERR MOD "ARP failure duing connect\n");
411 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
414 static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
416 struct cpl_abort_req *req = cplhdr(skb);
418 PDBG("%s t3cdev %p\n", __func__, dev);
419 req->cmd = CPL_ABORT_NO_RST;
420 iwch_cxgb3_ofld_send(dev, skb);
423 static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
425 struct cpl_close_con_req *req;
428 PDBG("%s ep %p\n", __func__, ep);
429 skb = get_skb(NULL, sizeof(*req), gfp);
431 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
434 skb->priority = CPL_PRIORITY_DATA;
435 set_arp_failure_handler(skb, arp_failure_discard);
436 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
438 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
439 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
440 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
443 static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
445 struct cpl_abort_req *req;
447 PDBG("%s ep %p\n", __func__, ep);
448 skb = get_skb(skb, sizeof(*req), gfp);
450 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
454 skb->priority = CPL_PRIORITY_DATA;
455 set_arp_failure_handler(skb, abort_arp_failure);
456 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
457 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
458 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
459 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
460 req->cmd = CPL_ABORT_SEND_RST;
461 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
464 static int send_connect(struct iwch_ep *ep)
466 struct cpl_act_open_req *req;
468 u32 opt0h, opt0l, opt2;
469 unsigned int mtu_idx;
472 PDBG("%s ep %p\n", __func__, ep);
474 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
476 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
480 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
481 wscale = compute_wscale(rcv_win);
486 V_WND_SCALE(wscale) |
488 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
489 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
490 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
491 skb->priority = CPL_PRIORITY_SETUP;
492 set_arp_failure_handler(skb, act_open_req_arp_failure);
494 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
495 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
496 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
497 req->local_port = ep->com.local_addr.sin_port;
498 req->peer_port = ep->com.remote_addr.sin_port;
499 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
500 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
501 req->opt0h = htonl(opt0h);
502 req->opt0l = htonl(opt0l);
504 req->opt2 = htonl(opt2);
505 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
508 static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
511 struct tx_data_wr *req;
512 struct mpa_message *mpa;
515 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
517 BUG_ON(skb_cloned(skb));
519 mpalen = sizeof(*mpa) + ep->plen;
520 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
522 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
524 connect_reply_upcall(ep, -ENOMEM);
529 skb_reserve(skb, sizeof(*req));
530 skb_put(skb, mpalen);
531 skb->priority = CPL_PRIORITY_DATA;
532 mpa = (struct mpa_message *) skb->data;
533 memset(mpa, 0, sizeof(*mpa));
534 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
535 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
536 (markers_enabled ? MPA_MARKERS : 0);
537 mpa->private_data_size = htons(ep->plen);
538 mpa->revision = mpa_rev;
541 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
544 * Reference the mpa skb. This ensures the data area
545 * will remain in memory until the hw acks the tx.
546 * Function tx_ack() will deref it.
549 set_arp_failure_handler(skb, arp_failure_discard);
550 skb_reset_transport_header(skb);
552 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
553 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
554 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
555 req->len = htonl(len);
556 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
557 V_TX_SNDBUF(snd_win>>15));
558 req->flags = htonl(F_TX_INIT);
559 req->sndseq = htonl(ep->snd_seq);
562 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
564 state_set(&ep->com, MPA_REQ_SENT);
568 static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
571 struct tx_data_wr *req;
572 struct mpa_message *mpa;
575 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
577 mpalen = sizeof(*mpa) + plen;
579 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
581 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
584 skb_reserve(skb, sizeof(*req));
585 mpa = (struct mpa_message *) skb_put(skb, mpalen);
586 memset(mpa, 0, sizeof(*mpa));
587 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
588 mpa->flags = MPA_REJECT;
589 mpa->revision = mpa_rev;
590 mpa->private_data_size = htons(plen);
592 memcpy(mpa->private_data, pdata, plen);
595 * Reference the mpa skb again. This ensures the data area
596 * will remain in memory until the hw acks the tx.
597 * Function tx_ack() will deref it.
600 skb->priority = CPL_PRIORITY_DATA;
601 set_arp_failure_handler(skb, arp_failure_discard);
602 skb_reset_transport_header(skb);
603 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
604 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
605 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
606 req->len = htonl(mpalen);
607 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
608 V_TX_SNDBUF(snd_win>>15));
609 req->flags = htonl(F_TX_INIT);
610 req->sndseq = htonl(ep->snd_seq);
613 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
616 static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
619 struct tx_data_wr *req;
620 struct mpa_message *mpa;
624 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
626 mpalen = sizeof(*mpa) + plen;
628 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
630 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
633 skb->priority = CPL_PRIORITY_DATA;
634 skb_reserve(skb, sizeof(*req));
635 mpa = (struct mpa_message *) skb_put(skb, mpalen);
636 memset(mpa, 0, sizeof(*mpa));
637 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
638 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
639 (markers_enabled ? MPA_MARKERS : 0);
640 mpa->revision = mpa_rev;
641 mpa->private_data_size = htons(plen);
643 memcpy(mpa->private_data, pdata, plen);
646 * Reference the mpa skb. This ensures the data area
647 * will remain in memory until the hw acks the tx.
648 * Function tx_ack() will deref it.
651 set_arp_failure_handler(skb, arp_failure_discard);
652 skb_reset_transport_header(skb);
654 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
655 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
656 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
657 req->len = htonl(len);
658 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
659 V_TX_SNDBUF(snd_win>>15));
660 req->flags = htonl(F_TX_INIT);
661 req->sndseq = htonl(ep->snd_seq);
663 state_set(&ep->com, MPA_REP_SENT);
664 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
667 static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
669 struct iwch_ep *ep = ctx;
670 struct cpl_act_establish *req = cplhdr(skb);
671 unsigned int tid = GET_TID(req);
673 PDBG("%s ep %p tid %d\n", __func__, ep, tid);
675 dst_confirm(ep->dst);
677 /* setup the hwtid for this connection */
679 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
681 ep->snd_seq = ntohl(req->snd_isn);
682 ep->rcv_seq = ntohl(req->rcv_isn);
684 set_emss(ep, ntohs(req->tcp_opt));
686 /* dealloc the atid */
687 cxgb3_free_atid(ep->com.tdev, ep->atid);
689 /* start MPA negotiation */
690 send_mpa_req(ep, skb);
695 static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
697 PDBG("%s ep %p\n", __FILE__, ep);
698 state_set(&ep->com, ABORTING);
699 send_abort(ep, skb, gfp);
702 static void close_complete_upcall(struct iwch_ep *ep)
704 struct iw_cm_event event;
706 PDBG("%s ep %p\n", __func__, ep);
707 memset(&event, 0, sizeof(event));
708 event.event = IW_CM_EVENT_CLOSE;
710 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
711 ep, ep->com.cm_id, ep->hwtid);
712 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
713 ep->com.cm_id->rem_ref(ep->com.cm_id);
714 ep->com.cm_id = NULL;
719 static void peer_close_upcall(struct iwch_ep *ep)
721 struct iw_cm_event event;
723 PDBG("%s ep %p\n", __func__, ep);
724 memset(&event, 0, sizeof(event));
725 event.event = IW_CM_EVENT_DISCONNECT;
727 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
728 ep, ep->com.cm_id, ep->hwtid);
729 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
733 static void peer_abort_upcall(struct iwch_ep *ep)
735 struct iw_cm_event event;
737 PDBG("%s ep %p\n", __func__, ep);
738 memset(&event, 0, sizeof(event));
739 event.event = IW_CM_EVENT_CLOSE;
740 event.status = -ECONNRESET;
742 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
743 ep->com.cm_id, ep->hwtid);
744 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
745 ep->com.cm_id->rem_ref(ep->com.cm_id);
746 ep->com.cm_id = NULL;
751 static void connect_reply_upcall(struct iwch_ep *ep, int status)
753 struct iw_cm_event event;
755 PDBG("%s ep %p status %d\n", __func__, ep, status);
756 memset(&event, 0, sizeof(event));
757 event.event = IW_CM_EVENT_CONNECT_REPLY;
758 event.status = status;
759 event.local_addr = ep->com.local_addr;
760 event.remote_addr = ep->com.remote_addr;
762 if ((status == 0) || (status == -ECONNREFUSED)) {
763 event.private_data_len = ep->plen;
764 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
767 PDBG("%s ep %p tid %d status %d\n", __func__, ep,
769 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
772 ep->com.cm_id->rem_ref(ep->com.cm_id);
773 ep->com.cm_id = NULL;
778 static void connect_request_upcall(struct iwch_ep *ep)
780 struct iw_cm_event event;
782 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
783 memset(&event, 0, sizeof(event));
784 event.event = IW_CM_EVENT_CONNECT_REQUEST;
785 event.local_addr = ep->com.local_addr;
786 event.remote_addr = ep->com.remote_addr;
787 event.private_data_len = ep->plen;
788 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
789 event.provider_data = ep;
790 if (state_read(&ep->parent_ep->com) != DEAD) {
792 ep->parent_ep->com.cm_id->event_handler(
793 ep->parent_ep->com.cm_id,
796 put_ep(&ep->parent_ep->com);
797 ep->parent_ep = NULL;
800 static void established_upcall(struct iwch_ep *ep)
802 struct iw_cm_event event;
804 PDBG("%s ep %p\n", __func__, ep);
805 memset(&event, 0, sizeof(event));
806 event.event = IW_CM_EVENT_ESTABLISHED;
808 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
809 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
813 static int update_rx_credits(struct iwch_ep *ep, u32 credits)
815 struct cpl_rx_data_ack *req;
818 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
819 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
821 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
825 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
826 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
827 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
828 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
829 skb->priority = CPL_PRIORITY_ACK;
830 iwch_cxgb3_ofld_send(ep->com.tdev, skb);
834 static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
836 struct mpa_message *mpa;
838 struct iwch_qp_attributes attrs;
839 enum iwch_qp_attr_mask mask;
842 PDBG("%s ep %p\n", __func__, ep);
845 * Stop mpa timer. If it expired, then the state has
846 * changed and we bail since ep_timeout already aborted
850 if (state_read(&ep->com) != MPA_REQ_SENT)
854 * If we get more than the supported amount of private data
855 * then we must fail this connection.
857 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
863 * copy the new data into our accumulation buffer.
865 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
867 ep->mpa_pkt_len += skb->len;
870 * if we don't even have the mpa message, then bail.
872 if (ep->mpa_pkt_len < sizeof(*mpa))
874 mpa = (struct mpa_message *) ep->mpa_pkt;
876 /* Validate MPA header. */
877 if (mpa->revision != mpa_rev) {
881 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
886 plen = ntohs(mpa->private_data_size);
889 * Fail if there's too much private data.
891 if (plen > MPA_MAX_PRIVATE_DATA) {
897 * If plen does not account for pkt size
899 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
904 ep->plen = (u8) plen;
907 * If we don't have all the pdata yet, then bail.
908 * We'll continue process when more data arrives.
910 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
913 if (mpa->flags & MPA_REJECT) {
919 * If we get here we have accumulated the entire mpa
920 * start reply message including private data. And
921 * the MPA header is valid.
923 state_set(&ep->com, FPDU_MODE);
924 ep->mpa_attr.initiator = 1;
925 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
926 ep->mpa_attr.recv_marker_enabled = markers_enabled;
927 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
928 ep->mpa_attr.version = mpa_rev;
929 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
930 "xmit_marker_enabled=%d, version=%d\n", __func__,
931 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
932 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
934 attrs.mpa_attr = ep->mpa_attr;
935 attrs.max_ird = ep->ird;
936 attrs.max_ord = ep->ord;
937 attrs.llp_stream_handle = ep;
938 attrs.next_state = IWCH_QP_STATE_RTS;
940 mask = IWCH_QP_ATTR_NEXT_STATE |
941 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
942 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
944 /* bind QP and TID with INIT_WR */
945 err = iwch_modify_qp(ep->com.qp->rhp,
946 ep->com.qp, mask, &attrs, 1);
950 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
951 iwch_post_zb_read(ep->com.qp);
956 abort_connection(ep, skb, GFP_KERNEL);
958 connect_reply_upcall(ep, err);
962 static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
964 struct mpa_message *mpa;
967 PDBG("%s ep %p\n", __func__, ep);
970 * Stop mpa timer. If it expired, then the state has
971 * changed and we bail since ep_timeout already aborted
975 if (state_read(&ep->com) != MPA_REQ_WAIT)
979 * If we get more than the supported amount of private data
980 * then we must fail this connection.
982 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
983 abort_connection(ep, skb, GFP_KERNEL);
987 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
990 * Copy the new data into our accumulation buffer.
992 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
994 ep->mpa_pkt_len += skb->len;
997 * If we don't even have the mpa message, then bail.
998 * We'll continue process when more data arrives.
1000 if (ep->mpa_pkt_len < sizeof(*mpa))
1002 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1003 mpa = (struct mpa_message *) ep->mpa_pkt;
1006 * Validate MPA Header.
1008 if (mpa->revision != mpa_rev) {
1009 abort_connection(ep, skb, GFP_KERNEL);
1013 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1014 abort_connection(ep, skb, GFP_KERNEL);
1018 plen = ntohs(mpa->private_data_size);
1021 * Fail if there's too much private data.
1023 if (plen > MPA_MAX_PRIVATE_DATA) {
1024 abort_connection(ep, skb, GFP_KERNEL);
1029 * If plen does not account for pkt size
1031 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1032 abort_connection(ep, skb, GFP_KERNEL);
1035 ep->plen = (u8) plen;
1038 * If we don't have all the pdata yet, then bail.
1040 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1044 * If we get here we have accumulated the entire mpa
1045 * start reply message including private data.
1047 ep->mpa_attr.initiator = 0;
1048 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1049 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1050 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1051 ep->mpa_attr.version = mpa_rev;
1052 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1053 "xmit_marker_enabled=%d, version=%d\n", __func__,
1054 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1055 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1057 state_set(&ep->com, MPA_REQ_RCVD);
1060 connect_request_upcall(ep);
1064 static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1066 struct iwch_ep *ep = ctx;
1067 struct cpl_rx_data *hdr = cplhdr(skb);
1068 unsigned int dlen = ntohs(hdr->len);
1070 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
1072 skb_pull(skb, sizeof(*hdr));
1073 skb_trim(skb, dlen);
1075 ep->rcv_seq += dlen;
1076 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1078 switch (state_read(&ep->com)) {
1080 process_mpa_reply(ep, skb);
1083 process_mpa_request(ep, skb);
1088 printk(KERN_ERR MOD "%s Unexpected streaming data."
1089 " ep %p state %d tid %d\n",
1090 __func__, ep, state_read(&ep->com), ep->hwtid);
1093 * The ep will timeout and inform the ULP of the failure.
1099 /* update RX credits */
1100 update_rx_credits(ep, dlen);
1102 return CPL_RET_BUF_DONE;
1106 * Upcall from the adapter indicating data has been transmitted.
1107 * For us its just the single MPA request or reply. We can now free
1108 * the skb holding the mpa message.
1110 static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1112 struct iwch_ep *ep = ctx;
1113 struct cpl_wr_ack *hdr = cplhdr(skb);
1114 unsigned int credits = ntohs(hdr->credits);
1116 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1119 PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n",
1120 __func__, ep, state_read(&ep->com));
1121 return CPL_RET_BUF_DONE;
1124 BUG_ON(credits != 1);
1125 dst_confirm(ep->dst);
1127 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1128 __func__, ep, state_read(&ep->com));
1129 if (ep->mpa_attr.initiator) {
1130 PDBG("%s initiator ep %p state %u\n",
1131 __func__, ep, state_read(&ep->com));
1133 iwch_post_zb_read(ep->com.qp);
1135 PDBG("%s responder ep %p state %u\n",
1136 __func__, ep, state_read(&ep->com));
1137 ep->com.rpl_done = 1;
1138 wake_up(&ep->com.waitq);
1141 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1142 __func__, ep, state_read(&ep->com));
1143 kfree_skb(ep->mpa_skb);
1146 return CPL_RET_BUF_DONE;
1149 static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1151 struct iwch_ep *ep = ctx;
1152 unsigned long flags;
1155 PDBG("%s ep %p\n", __func__, ep);
1159 * We get 2 abort replies from the HW. The first one must
1160 * be ignored except for scribbling that we need one more.
1162 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
1163 return CPL_RET_BUF_DONE;
1166 spin_lock_irqsave(&ep->com.lock, flags);
1167 switch (ep->com.state) {
1169 close_complete_upcall(ep);
1170 __state_set(&ep->com, DEAD);
1174 printk(KERN_ERR "%s ep %p state %d\n",
1175 __func__, ep, ep->com.state);
1178 spin_unlock_irqrestore(&ep->com.lock, flags);
1181 release_ep_resources(ep);
1182 return CPL_RET_BUF_DONE;
1186 * Return whether a failed active open has allocated a TID
1188 static inline int act_open_has_tid(int status)
1190 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1191 status != CPL_ERR_ARP_MISS;
1194 static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1196 struct iwch_ep *ep = ctx;
1197 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1199 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
1200 status2errno(rpl->status));
1201 connect_reply_upcall(ep, status2errno(rpl->status));
1202 state_set(&ep->com, DEAD);
1203 if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
1204 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1205 cxgb3_free_atid(ep->com.tdev, ep->atid);
1206 dst_release(ep->dst);
1207 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1209 return CPL_RET_BUF_DONE;
1212 static int listen_start(struct iwch_listen_ep *ep)
1214 struct sk_buff *skb;
1215 struct cpl_pass_open_req *req;
1217 PDBG("%s ep %p\n", __func__, ep);
1218 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1220 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
1224 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
1225 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1226 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1227 req->local_port = ep->com.local_addr.sin_port;
1228 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1231 req->peer_netmask = 0;
1232 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1233 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1234 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1237 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1240 static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1242 struct iwch_listen_ep *ep = ctx;
1243 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1245 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1246 rpl->status, status2errno(rpl->status));
1247 ep->com.rpl_err = status2errno(rpl->status);
1248 ep->com.rpl_done = 1;
1249 wake_up(&ep->com.waitq);
1251 return CPL_RET_BUF_DONE;
1254 static int listen_stop(struct iwch_listen_ep *ep)
1256 struct sk_buff *skb;
1257 struct cpl_close_listserv_req *req;
1259 PDBG("%s ep %p\n", __func__, ep);
1260 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1262 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1265 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1266 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1268 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1270 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1273 static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1276 struct iwch_listen_ep *ep = ctx;
1277 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1279 PDBG("%s ep %p\n", __func__, ep);
1280 ep->com.rpl_err = status2errno(rpl->status);
1281 ep->com.rpl_done = 1;
1282 wake_up(&ep->com.waitq);
1283 return CPL_RET_BUF_DONE;
1286 static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1288 struct cpl_pass_accept_rpl *rpl;
1289 unsigned int mtu_idx;
1290 u32 opt0h, opt0l, opt2;
1293 PDBG("%s ep %p\n", __func__, ep);
1294 BUG_ON(skb_cloned(skb));
1295 skb_trim(skb, sizeof(*rpl));
1297 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1298 wscale = compute_wscale(rcv_win);
1299 opt0h = V_NAGLE(0) |
1303 V_WND_SCALE(wscale) |
1304 V_MSS_IDX(mtu_idx) |
1305 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1306 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1307 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1310 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1311 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1312 rpl->peer_ip = peer_ip;
1313 rpl->opt0h = htonl(opt0h);
1314 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1315 rpl->opt2 = htonl(opt2);
1316 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1317 skb->priority = CPL_PRIORITY_SETUP;
1318 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
1323 static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1324 struct sk_buff *skb)
1326 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
1328 BUG_ON(skb_cloned(skb));
1329 skb_trim(skb, sizeof(struct cpl_tid_release));
1332 if (tdev->type != T3A)
1333 release_tid(tdev, hwtid, skb);
1335 struct cpl_pass_accept_rpl *rpl;
1338 skb->priority = CPL_PRIORITY_SETUP;
1339 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1340 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1342 rpl->peer_ip = peer_ip;
1343 rpl->opt0h = htonl(F_TCAM_BYPASS);
1344 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1346 rpl->rsvd = rpl->opt2;
1347 iwch_cxgb3_ofld_send(tdev, skb);
1351 static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1353 struct iwch_ep *child_ep, *parent_ep = ctx;
1354 struct cpl_pass_accept_req *req = cplhdr(skb);
1355 unsigned int hwtid = GET_TID(req);
1356 struct dst_entry *dst;
1357 struct l2t_entry *l2t;
1361 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1363 if (state_read(&parent_ep->com) != LISTEN) {
1364 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1370 * Find the netdev for this connection request.
1372 tim.mac_addr = req->dst_mac;
1373 tim.vlan_tag = ntohs(req->vlan_tag);
1374 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1375 printk(KERN_ERR "%s bad dst mac %pM\n",
1376 __func__, req->dst_mac);
1380 /* Find output route */
1381 rt = find_route(tdev,
1385 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1387 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1392 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1394 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1399 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1401 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1403 l2t_release(L2DATA(tdev), l2t);
1407 state_set(&child_ep->com, CONNECTING);
1408 child_ep->com.tdev = tdev;
1409 child_ep->com.cm_id = NULL;
1410 child_ep->com.local_addr.sin_family = PF_INET;
1411 child_ep->com.local_addr.sin_port = req->local_port;
1412 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1413 child_ep->com.remote_addr.sin_family = PF_INET;
1414 child_ep->com.remote_addr.sin_port = req->peer_port;
1415 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1416 get_ep(&parent_ep->com);
1417 child_ep->parent_ep = parent_ep;
1418 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1419 child_ep->l2t = l2t;
1420 child_ep->dst = dst;
1421 child_ep->hwtid = hwtid;
1422 init_timer(&child_ep->timer);
1423 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1424 accept_cr(child_ep, req->peer_ip, skb);
1427 reject_cr(tdev, hwtid, req->peer_ip, skb);
1429 return CPL_RET_BUF_DONE;
1432 static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1434 struct iwch_ep *ep = ctx;
1435 struct cpl_pass_establish *req = cplhdr(skb);
1437 PDBG("%s ep %p\n", __func__, ep);
1438 ep->snd_seq = ntohl(req->snd_isn);
1439 ep->rcv_seq = ntohl(req->rcv_isn);
1441 set_emss(ep, ntohs(req->tcp_opt));
1443 dst_confirm(ep->dst);
1444 state_set(&ep->com, MPA_REQ_WAIT);
1447 return CPL_RET_BUF_DONE;
1450 static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1452 struct iwch_ep *ep = ctx;
1453 struct iwch_qp_attributes attrs;
1454 unsigned long flags;
1458 PDBG("%s ep %p\n", __func__, ep);
1459 dst_confirm(ep->dst);
1461 spin_lock_irqsave(&ep->com.lock, flags);
1462 switch (ep->com.state) {
1464 __state_set(&ep->com, CLOSING);
1467 __state_set(&ep->com, CLOSING);
1468 connect_reply_upcall(ep, -ECONNRESET);
1473 * We're gonna mark this puppy DEAD, but keep
1474 * the reference on it until the ULP accepts or
1475 * rejects the CR. Also wake up anyone waiting
1476 * in rdma connection migration (see iwch_accept_cr()).
1478 __state_set(&ep->com, CLOSING);
1479 ep->com.rpl_done = 1;
1480 ep->com.rpl_err = -ECONNRESET;
1481 PDBG("waking up ep %p\n", ep);
1482 wake_up(&ep->com.waitq);
1485 __state_set(&ep->com, CLOSING);
1486 ep->com.rpl_done = 1;
1487 ep->com.rpl_err = -ECONNRESET;
1488 PDBG("waking up ep %p\n", ep);
1489 wake_up(&ep->com.waitq);
1493 __state_set(&ep->com, CLOSING);
1494 attrs.next_state = IWCH_QP_STATE_CLOSING;
1495 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1496 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1497 peer_close_upcall(ep);
1503 __state_set(&ep->com, MORIBUND);
1508 if (ep->com.cm_id && ep->com.qp) {
1509 attrs.next_state = IWCH_QP_STATE_IDLE;
1510 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1511 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1513 close_complete_upcall(ep);
1514 __state_set(&ep->com, DEAD);
1524 spin_unlock_irqrestore(&ep->com.lock, flags);
1526 iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1528 release_ep_resources(ep);
1529 return CPL_RET_BUF_DONE;
1533 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1535 static int is_neg_adv_abort(unsigned int status)
1537 return status == CPL_ERR_RTX_NEG_ADVICE ||
1538 status == CPL_ERR_PERSIST_NEG_ADVICE;
1541 static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1543 struct cpl_abort_req_rss *req = cplhdr(skb);
1544 struct iwch_ep *ep = ctx;
1545 struct cpl_abort_rpl *rpl;
1546 struct sk_buff *rpl_skb;
1547 struct iwch_qp_attributes attrs;
1550 unsigned long flags;
1552 if (is_neg_adv_abort(req->status)) {
1553 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1555 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1556 return CPL_RET_BUF_DONE;
1560 * We get 2 peer aborts from the HW. The first one must
1561 * be ignored except for scribbling that we need one more.
1563 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
1564 return CPL_RET_BUF_DONE;
1567 spin_lock_irqsave(&ep->com.lock, flags);
1568 PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
1569 switch (ep->com.state) {
1577 connect_reply_upcall(ep, -ECONNRESET);
1580 ep->com.rpl_done = 1;
1581 ep->com.rpl_err = -ECONNRESET;
1582 PDBG("waking up ep %p\n", ep);
1583 wake_up(&ep->com.waitq);
1588 * We're gonna mark this puppy DEAD, but keep
1589 * the reference on it until the ULP accepts or
1590 * rejects the CR. Also wake up anyone waiting
1591 * in rdma connection migration (see iwch_accept_cr()).
1593 ep->com.rpl_done = 1;
1594 ep->com.rpl_err = -ECONNRESET;
1595 PDBG("waking up ep %p\n", ep);
1596 wake_up(&ep->com.waitq);
1603 if (ep->com.cm_id && ep->com.qp) {
1604 attrs.next_state = IWCH_QP_STATE_ERROR;
1605 ret = iwch_modify_qp(ep->com.qp->rhp,
1606 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1610 "%s - qp <- error failed!\n",
1613 peer_abort_upcall(ep);
1618 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1619 spin_unlock_irqrestore(&ep->com.lock, flags);
1620 return CPL_RET_BUF_DONE;
1625 dst_confirm(ep->dst);
1626 if (ep->com.state != ABORTING) {
1627 __state_set(&ep->com, DEAD);
1630 spin_unlock_irqrestore(&ep->com.lock, flags);
1632 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1634 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1639 rpl_skb->priority = CPL_PRIORITY_DATA;
1640 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1641 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1642 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1643 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1644 rpl->cmd = CPL_ABORT_NO_RST;
1645 iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
1648 release_ep_resources(ep);
1649 return CPL_RET_BUF_DONE;
1652 static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1654 struct iwch_ep *ep = ctx;
1655 struct iwch_qp_attributes attrs;
1656 unsigned long flags;
1659 PDBG("%s ep %p\n", __func__, ep);
1662 /* The cm_id may be null if we failed to connect */
1663 spin_lock_irqsave(&ep->com.lock, flags);
1664 switch (ep->com.state) {
1666 __state_set(&ep->com, MORIBUND);
1670 if ((ep->com.cm_id) && (ep->com.qp)) {
1671 attrs.next_state = IWCH_QP_STATE_IDLE;
1672 iwch_modify_qp(ep->com.qp->rhp,
1674 IWCH_QP_ATTR_NEXT_STATE,
1677 close_complete_upcall(ep);
1678 __state_set(&ep->com, DEAD);
1688 spin_unlock_irqrestore(&ep->com.lock, flags);
1690 release_ep_resources(ep);
1691 return CPL_RET_BUF_DONE;
1695 * T3A does 3 things when a TERM is received:
1696 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1697 * 2) generate an async event on the QP with the TERMINATE opcode
1698 * 3) post a TERMINATE opcde cqe into the associated CQ.
1700 * For (1), we save the message in the qp for later consumer consumption.
1701 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1702 * For (3), we toss the CQE in cxio_poll_cq().
1704 * terminate() handles case (1)...
1706 static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1708 struct iwch_ep *ep = ctx;
1710 if (state_read(&ep->com) != FPDU_MODE)
1711 return CPL_RET_BUF_DONE;
1713 PDBG("%s ep %p\n", __func__, ep);
1714 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1715 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1716 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1718 ep->com.qp->attr.terminate_msg_len = skb->len;
1719 ep->com.qp->attr.is_terminate_local = 0;
1720 return CPL_RET_BUF_DONE;
1723 static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1725 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1726 struct iwch_ep *ep = ctx;
1728 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
1731 struct iwch_qp_attributes attrs;
1733 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1734 __func__, ep->hwtid);
1736 attrs.next_state = IWCH_QP_STATE_ERROR;
1737 iwch_modify_qp(ep->com.qp->rhp,
1738 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1740 abort_connection(ep, NULL, GFP_KERNEL);
1742 return CPL_RET_BUF_DONE;
1745 static void ep_timeout(unsigned long arg)
1747 struct iwch_ep *ep = (struct iwch_ep *)arg;
1748 struct iwch_qp_attributes attrs;
1749 unsigned long flags;
1752 spin_lock_irqsave(&ep->com.lock, flags);
1753 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1755 switch (ep->com.state) {
1757 __state_set(&ep->com, ABORTING);
1758 connect_reply_upcall(ep, -ETIMEDOUT);
1761 __state_set(&ep->com, ABORTING);
1765 if (ep->com.cm_id && ep->com.qp) {
1766 attrs.next_state = IWCH_QP_STATE_ERROR;
1767 iwch_modify_qp(ep->com.qp->rhp,
1768 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1771 __state_set(&ep->com, ABORTING);
1774 printk(KERN_ERR "%s unexpected state ep %p state %u\n",
1775 __func__, ep, ep->com.state);
1779 spin_unlock_irqrestore(&ep->com.lock, flags);
1781 abort_connection(ep, NULL, GFP_ATOMIC);
1785 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1788 struct iwch_ep *ep = to_ep(cm_id);
1789 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1791 if (state_read(&ep->com) == DEAD) {
1795 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1797 abort_connection(ep, NULL, GFP_KERNEL);
1799 err = send_mpa_reject(ep, pdata, pdata_len);
1800 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1806 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1809 struct iwch_qp_attributes attrs;
1810 enum iwch_qp_attr_mask mask;
1811 struct iwch_ep *ep = to_ep(cm_id);
1812 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1813 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1815 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1816 if (state_read(&ep->com) == DEAD) {
1821 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1824 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1825 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1826 abort_connection(ep, NULL, GFP_KERNEL);
1831 cm_id->add_ref(cm_id);
1832 ep->com.cm_id = cm_id;
1835 ep->ird = conn_param->ird;
1836 ep->ord = conn_param->ord;
1838 if (peer2peer && ep->ird == 0)
1841 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1843 /* bind QP to EP and move to RTS */
1844 attrs.mpa_attr = ep->mpa_attr;
1845 attrs.max_ird = ep->ird;
1846 attrs.max_ord = ep->ord;
1847 attrs.llp_stream_handle = ep;
1848 attrs.next_state = IWCH_QP_STATE_RTS;
1850 /* bind QP and TID with INIT_WR */
1851 mask = IWCH_QP_ATTR_NEXT_STATE |
1852 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1853 IWCH_QP_ATTR_MPA_ATTR |
1854 IWCH_QP_ATTR_MAX_IRD |
1855 IWCH_QP_ATTR_MAX_ORD;
1857 err = iwch_modify_qp(ep->com.qp->rhp,
1858 ep->com.qp, mask, &attrs, 1);
1862 /* if needed, wait for wr_ack */
1863 if (iwch_rqes_posted(qp)) {
1864 wait_event(ep->com.waitq, ep->com.rpl_done);
1865 err = ep->com.rpl_err;
1870 err = send_mpa_reply(ep, conn_param->private_data,
1871 conn_param->private_data_len);
1876 state_set(&ep->com, FPDU_MODE);
1877 established_upcall(ep);
1881 ep->com.cm_id = NULL;
1883 cm_id->rem_ref(cm_id);
1889 static int is_loopback_dst(struct iw_cm_id *cm_id)
1891 struct net_device *dev;
1893 dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
1900 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1903 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1907 if (is_loopback_dst(cm_id)) {
1912 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1914 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1918 init_timer(&ep->timer);
1919 ep->plen = conn_param->private_data_len;
1921 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1922 conn_param->private_data, ep->plen);
1923 ep->ird = conn_param->ird;
1924 ep->ord = conn_param->ord;
1926 if (peer2peer && ep->ord == 0)
1929 ep->com.tdev = h->rdev.t3cdev_p;
1931 cm_id->add_ref(cm_id);
1932 ep->com.cm_id = cm_id;
1933 ep->com.qp = get_qhp(h, conn_param->qpn);
1934 BUG_ON(!ep->com.qp);
1935 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1939 * Allocate an active TID to initiate a TCP connection.
1941 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1942 if (ep->atid == -1) {
1943 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1949 rt = find_route(h->rdev.t3cdev_p,
1950 cm_id->local_addr.sin_addr.s_addr,
1951 cm_id->remote_addr.sin_addr.s_addr,
1952 cm_id->local_addr.sin_port,
1953 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1955 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1956 err = -EHOSTUNREACH;
1959 ep->dst = &rt->u.dst;
1961 /* get a l2t entry */
1962 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1963 ep->dst->neighbour->dev);
1965 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1970 state_set(&ep->com, CONNECTING);
1971 ep->tos = IPTOS_LOWDELAY;
1972 ep->com.local_addr = cm_id->local_addr;
1973 ep->com.remote_addr = cm_id->remote_addr;
1975 /* send connect request to rnic */
1976 err = send_connect(ep);
1980 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
1982 dst_release(ep->dst);
1984 cxgb3_free_atid(ep->com.tdev, ep->atid);
1986 cm_id->rem_ref(cm_id);
1992 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1995 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1996 struct iwch_listen_ep *ep;
2001 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2003 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2007 PDBG("%s ep %p\n", __func__, ep);
2008 ep->com.tdev = h->rdev.t3cdev_p;
2009 cm_id->add_ref(cm_id);
2010 ep->com.cm_id = cm_id;
2011 ep->backlog = backlog;
2012 ep->com.local_addr = cm_id->local_addr;
2015 * Allocate a server TID.
2017 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
2018 if (ep->stid == -1) {
2019 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2024 state_set(&ep->com, LISTEN);
2025 err = listen_start(ep);
2029 /* wait for pass_open_rpl */
2030 wait_event(ep->com.waitq, ep->com.rpl_done);
2031 err = ep->com.rpl_err;
2033 cm_id->provider_data = ep;
2037 cxgb3_free_stid(ep->com.tdev, ep->stid);
2039 cm_id->rem_ref(cm_id);
2046 int iwch_destroy_listen(struct iw_cm_id *cm_id)
2049 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
2051 PDBG("%s ep %p\n", __func__, ep);
2054 state_set(&ep->com, DEAD);
2055 ep->com.rpl_done = 0;
2056 ep->com.rpl_err = 0;
2057 err = listen_stop(ep);
2060 wait_event(ep->com.waitq, ep->com.rpl_done);
2061 cxgb3_free_stid(ep->com.tdev, ep->stid);
2063 err = ep->com.rpl_err;
2064 cm_id->rem_ref(cm_id);
2069 int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2072 unsigned long flags;
2075 struct t3cdev *tdev;
2076 struct cxio_rdev *rdev;
2078 spin_lock_irqsave(&ep->com.lock, flags);
2080 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2081 states[ep->com.state], abrupt);
2083 tdev = (struct t3cdev *)ep->com.tdev;
2084 rdev = (struct cxio_rdev *)tdev->ulp;
2085 if (cxio_fatal_error(rdev)) {
2087 close_complete_upcall(ep);
2088 ep->com.state = DEAD;
2090 switch (ep->com.state) {
2098 ep->com.state = ABORTING;
2100 ep->com.state = CLOSING;
2103 set_bit(CLOSE_SENT, &ep->com.flags);
2106 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2110 ep->com.state = ABORTING;
2112 ep->com.state = MORIBUND;
2118 PDBG("%s ignoring disconnect ep %p state %u\n",
2119 __func__, ep, ep->com.state);
2126 spin_unlock_irqrestore(&ep->com.lock, flags);
2129 ret = send_abort(ep, NULL, gfp);
2131 ret = send_halfclose(ep, gfp);
2136 release_ep_resources(ep);
2140 int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2141 struct l2t_entry *l2t)
2143 struct iwch_ep *ep = ctx;
2148 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2151 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2159 * All the CM events are handled on a work queue to have a safe context.
2161 static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2163 struct iwch_ep_common *epc = ctx;
2168 * Save ctx and tdev in the skb->cb area.
2170 *((void **) skb->cb) = ctx;
2171 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2174 * Queue the skb and schedule the worker thread.
2176 skb_queue_tail(&rxq, skb);
2177 queue_work(workq, &skb_work);
2181 static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2183 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2185 if (rpl->status != CPL_ERR_NONE) {
2186 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2187 "for tid %u\n", rpl->status, GET_TID(rpl));
2189 return CPL_RET_BUF_DONE;
2192 int __init iwch_cm_init(void)
2194 skb_queue_head_init(&rxq);
2196 workq = create_singlethread_workqueue("iw_cxgb3");
2201 * All upcalls from the T3 Core go to sched() to
2202 * schedule the processing on a work queue.
2204 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2205 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2206 t3c_handlers[CPL_RX_DATA] = sched;
2207 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2208 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2209 t3c_handlers[CPL_ABORT_RPL] = sched;
2210 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2211 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2212 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2213 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2214 t3c_handlers[CPL_PEER_CLOSE] = sched;
2215 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2216 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2217 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2218 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2219 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2222 * These are the real handlers that are called from a
2225 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2226 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2227 work_handlers[CPL_RX_DATA] = rx_data;
2228 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2229 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2230 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2231 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2232 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2233 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2234 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2235 work_handlers[CPL_PEER_CLOSE] = peer_close;
2236 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2237 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2238 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2239 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2243 void __exit iwch_cm_term(void)
2245 flush_workqueue(workq);
2246 destroy_workqueue(workq);