2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
50 #include <rdma/ib_addr.h>
55 static char *states[] = {
72 module_param(nocong, int, 0644);
73 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
75 static int enable_ecn;
76 module_param(enable_ecn, int, 0644);
77 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
79 static int dack_mode = 1;
80 module_param(dack_mode, int, 0644);
81 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
83 uint c4iw_max_read_depth = 32;
84 module_param(c4iw_max_read_depth, int, 0644);
85 MODULE_PARM_DESC(c4iw_max_read_depth,
86 "Per-connection max ORD/IRD (default=32)");
88 static int enable_tcp_timestamps;
89 module_param(enable_tcp_timestamps, int, 0644);
90 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
92 static int enable_tcp_sack;
93 module_param(enable_tcp_sack, int, 0644);
94 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
96 static int enable_tcp_window_scaling = 1;
97 module_param(enable_tcp_window_scaling, int, 0644);
98 MODULE_PARM_DESC(enable_tcp_window_scaling,
99 "Enable tcp window scaling (default=1)");
102 module_param(c4iw_debug, int, 0644);
103 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
105 static int peer2peer = 1;
106 module_param(peer2peer, int, 0644);
107 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
109 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
110 module_param(p2p_type, int, 0644);
111 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
112 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
114 static int ep_timeout_secs = 60;
115 module_param(ep_timeout_secs, int, 0644);
116 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
117 "in seconds (default=60)");
119 static int mpa_rev = 2;
120 module_param(mpa_rev, int, 0644);
121 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
122 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
123 " compliant (default=2)");
125 static int markers_enabled;
126 module_param(markers_enabled, int, 0644);
127 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
129 static int crc_enabled = 1;
130 module_param(crc_enabled, int, 0644);
131 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
133 static int rcv_win = 256 * 1024;
134 module_param(rcv_win, int, 0644);
135 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
137 static int snd_win = 128 * 1024;
138 module_param(snd_win, int, 0644);
139 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
141 static struct workqueue_struct *workq;
143 static struct sk_buff_head rxq;
145 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
146 static void ep_timeout(unsigned long arg);
147 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
148 static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
150 static LIST_HEAD(timeout_list);
151 static spinlock_t timeout_lock;
153 static void deref_cm_id(struct c4iw_ep_common *epc)
155 epc->cm_id->rem_ref(epc->cm_id);
157 set_bit(CM_ID_DEREFED, &epc->history);
160 static void ref_cm_id(struct c4iw_ep_common *epc)
162 set_bit(CM_ID_REFED, &epc->history);
163 epc->cm_id->add_ref(epc->cm_id);
166 static void deref_qp(struct c4iw_ep *ep)
168 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
169 clear_bit(QP_REFERENCED, &ep->com.flags);
170 set_bit(QP_DEREFED, &ep->com.history);
173 static void ref_qp(struct c4iw_ep *ep)
175 set_bit(QP_REFERENCED, &ep->com.flags);
176 set_bit(QP_REFED, &ep->com.history);
177 c4iw_qp_add_ref(&ep->com.qp->ibqp);
180 static void start_ep_timer(struct c4iw_ep *ep)
182 PDBG("%s ep %p\n", __func__, ep);
183 if (timer_pending(&ep->timer)) {
184 pr_err("%s timer already started! ep %p\n",
188 clear_bit(TIMEOUT, &ep->com.flags);
189 c4iw_get_ep(&ep->com);
190 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
191 ep->timer.data = (unsigned long)ep;
192 ep->timer.function = ep_timeout;
193 add_timer(&ep->timer);
196 static int stop_ep_timer(struct c4iw_ep *ep)
198 PDBG("%s ep %p stopping\n", __func__, ep);
199 del_timer_sync(&ep->timer);
200 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
201 c4iw_put_ep(&ep->com);
207 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
208 struct l2t_entry *l2e)
212 if (c4iw_fatal_error(rdev)) {
214 PDBG("%s - device in error state - dropping\n", __func__);
217 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
220 else if (error == NET_XMIT_DROP)
222 return error < 0 ? error : 0;
225 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
229 if (c4iw_fatal_error(rdev)) {
231 PDBG("%s - device in error state - dropping\n", __func__);
234 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
237 return error < 0 ? error : 0;
240 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
242 struct cpl_tid_release *req;
244 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
247 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
248 INIT_TP_WR(req, hwtid);
249 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
250 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
251 c4iw_ofld_send(rdev, skb);
255 static void set_emss(struct c4iw_ep *ep, u16 opt)
257 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
258 ((AF_INET == ep->com.remote_addr.ss_family) ?
259 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
260 sizeof(struct tcphdr);
262 if (TCPOPT_TSTAMP_G(opt))
263 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
267 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
268 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
269 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
273 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
275 enum c4iw_ep_state state;
277 mutex_lock(&epc->mutex);
279 mutex_unlock(&epc->mutex);
283 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
288 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
290 mutex_lock(&epc->mutex);
291 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
292 __state_set(epc, new);
293 mutex_unlock(&epc->mutex);
297 static void *alloc_ep(int size, gfp_t gfp)
299 struct c4iw_ep_common *epc;
301 epc = kzalloc(size, gfp);
303 kref_init(&epc->kref);
304 mutex_init(&epc->mutex);
305 c4iw_init_wr_wait(&epc->wr_wait);
307 PDBG("%s alloc ep %p\n", __func__, epc);
311 static void remove_ep_tid(struct c4iw_ep *ep)
315 spin_lock_irqsave(&ep->com.dev->lock, flags);
316 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
317 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
320 static void insert_ep_tid(struct c4iw_ep *ep)
324 spin_lock_irqsave(&ep->com.dev->lock, flags);
325 _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
326 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
330 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
332 static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
337 spin_lock_irqsave(&dev->lock, flags);
338 ep = idr_find(&dev->hwtid_idr, tid);
340 c4iw_get_ep(&ep->com);
341 spin_unlock_irqrestore(&dev->lock, flags);
346 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
348 static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
351 struct c4iw_listen_ep *ep;
354 spin_lock_irqsave(&dev->lock, flags);
355 ep = idr_find(&dev->stid_idr, stid);
357 c4iw_get_ep(&ep->com);
358 spin_unlock_irqrestore(&dev->lock, flags);
362 void _c4iw_free_ep(struct kref *kref)
366 ep = container_of(kref, struct c4iw_ep, com.kref);
367 PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
368 if (test_bit(QP_REFERENCED, &ep->com.flags))
370 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
371 if (ep->com.remote_addr.ss_family == AF_INET6) {
372 struct sockaddr_in6 *sin6 =
373 (struct sockaddr_in6 *)
377 ep->com.dev->rdev.lldi.ports[0],
378 (const u32 *)&sin6->sin6_addr.s6_addr,
381 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
382 dst_release(ep->dst);
383 cxgb4_l2t_release(ep->l2t);
385 kfree_skb(ep->mpa_skb);
390 static void release_ep_resources(struct c4iw_ep *ep)
392 set_bit(RELEASE_RESOURCES, &ep->com.flags);
395 * If we have a hwtid, then remove it from the idr table
396 * so lookups will no longer find this endpoint. Otherwise
397 * we have a race where one thread finds the ep ptr just
398 * before the other thread is freeing the ep memory.
402 c4iw_put_ep(&ep->com);
405 static int status2errno(int status)
410 case CPL_ERR_CONN_RESET:
412 case CPL_ERR_ARP_MISS:
413 return -EHOSTUNREACH;
414 case CPL_ERR_CONN_TIMEDOUT:
416 case CPL_ERR_TCAM_FULL:
418 case CPL_ERR_CONN_EXIST:
426 * Try and reuse skbs already allocated...
428 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
430 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
433 skb_reset_transport_header(skb);
435 skb = alloc_skb(len, gfp);
437 t4_set_arp_err_handler(skb, NULL, NULL);
441 static struct net_device *get_real_dev(struct net_device *egress_dev)
443 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
446 static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
450 egress_dev = get_real_dev(egress_dev);
451 for (i = 0; i < dev->rdev.lldi.nports; i++)
452 if (dev->rdev.lldi.ports[i] == egress_dev)
457 static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
458 __u8 *peer_ip, __be16 local_port,
459 __be16 peer_port, u8 tos,
462 struct dst_entry *dst = NULL;
464 if (IS_ENABLED(CONFIG_IPV6)) {
467 memset(&fl6, 0, sizeof(fl6));
468 memcpy(&fl6.daddr, peer_ip, 16);
469 memcpy(&fl6.saddr, local_ip, 16);
470 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
471 fl6.flowi6_oif = sin6_scope_id;
472 dst = ip6_route_output(&init_net, NULL, &fl6);
475 if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
476 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
486 static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
487 __be32 peer_ip, __be16 local_port,
488 __be16 peer_port, u8 tos)
494 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
495 peer_port, local_port, IPPROTO_TCP,
499 n = dst_neigh_lookup(&rt->dst, &peer_ip);
502 if (!our_interface(dev, n->dev) &&
503 !(n->dev->flags & IFF_LOOPBACK)) {
505 dst_release(&rt->dst);
512 static void arp_failure_discard(void *handle, struct sk_buff *skb)
514 pr_err(MOD "ARP failure\n");
518 static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
520 pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
525 FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
526 FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
529 static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
533 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
534 release_ep_resources(ep);
538 static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
542 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
543 c4iw_put_ep(&ep->parent_ep->com);
544 release_ep_resources(ep);
549 * Fake up a special CPL opcode and call sched() so process_work() will call
550 * _put_ep_safe() in a safe context to free the ep resources. This is needed
551 * because ARP error handlers are called in an ATOMIC context, and
552 * _c4iw_free_ep() needs to block.
554 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
557 struct cpl_act_establish *rpl = cplhdr(skb);
559 /* Set our special ARP_FAILURE opcode */
560 rpl->ot.opcode = cpl;
563 * Save ep in the skb->cb area, after where sched() will save the dev
566 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
567 sched(ep->com.dev, skb);
570 /* Handle an ARP failure for an accept */
571 static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
573 struct c4iw_ep *ep = handle;
575 pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n",
578 __state_set(&ep->com, DEAD);
579 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
583 * Handle an ARP failure for an active open.
585 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
587 struct c4iw_ep *ep = handle;
589 printk(KERN_ERR MOD "ARP failure during connect\n");
590 connect_reply_upcall(ep, -EHOSTUNREACH);
591 __state_set(&ep->com, DEAD);
592 if (ep->com.remote_addr.ss_family == AF_INET6) {
593 struct sockaddr_in6 *sin6 =
594 (struct sockaddr_in6 *)&ep->com.local_addr;
595 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
596 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
598 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
599 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
600 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
604 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
607 static void abort_arp_failure(void *handle, struct sk_buff *skb)
610 struct c4iw_ep *ep = handle;
611 struct c4iw_rdev *rdev = &ep->com.dev->rdev;
612 struct cpl_abort_req *req = cplhdr(skb);
614 PDBG("%s rdev %p\n", __func__, rdev);
615 req->cmd = CPL_ABORT_NO_RST;
616 ret = c4iw_ofld_send(rdev, skb);
618 __state_set(&ep->com, DEAD);
619 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
623 static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
625 unsigned int flowclen = 80;
626 struct fw_flowc_wr *flowc;
628 u16 vlan = ep->l2t->vlan;
631 if (vlan == CPL_L2T_VLAN_NONE)
636 skb = get_skb(skb, flowclen, GFP_KERNEL);
637 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
639 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
640 FW_FLOWC_WR_NPARAMS_V(nparams));
641 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
642 16)) | FW_WR_FLOWID_V(ep->hwtid));
644 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
645 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
646 (ep->com.dev->rdev.lldi.pf));
647 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
648 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
649 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
650 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
651 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
652 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
653 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
654 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
655 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
656 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
657 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
658 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
659 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
660 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
664 pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
665 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
666 flowc->mnemval[8].val = cpu_to_be32(pri);
668 /* Pad WR to 16 byte boundary */
669 flowc->mnemval[8].mnemonic = 0;
670 flowc->mnemval[8].val = 0;
672 for (i = 0; i < 9; i++) {
673 flowc->mnemval[i].r4[0] = 0;
674 flowc->mnemval[i].r4[1] = 0;
675 flowc->mnemval[i].r4[2] = 0;
678 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
679 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
682 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
684 struct cpl_close_con_req *req;
686 int wrlen = roundup(sizeof *req, 16);
688 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
689 skb = get_skb(NULL, wrlen, gfp);
691 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
694 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
695 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
696 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
697 memset(req, 0, wrlen);
698 INIT_TP_WR(req, ep->hwtid);
699 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
701 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
704 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
706 struct cpl_abort_req *req;
707 int wrlen = roundup(sizeof *req, 16);
709 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
710 skb = get_skb(skb, wrlen, gfp);
712 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
716 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
717 t4_set_arp_err_handler(skb, ep, abort_arp_failure);
718 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
719 memset(req, 0, wrlen);
720 INIT_TP_WR(req, ep->hwtid);
721 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
722 req->cmd = CPL_ABORT_SEND_RST;
723 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
726 static void best_mtu(const unsigned short *mtus, unsigned short mtu,
727 unsigned int *idx, int use_ts, int ipv6)
729 unsigned short hdr_size = (ipv6 ?
730 sizeof(struct ipv6hdr) :
731 sizeof(struct iphdr)) +
732 sizeof(struct tcphdr) +
734 round_up(TCPOLEN_TIMESTAMP, 4) : 0);
735 unsigned short data_size = mtu - hdr_size;
737 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
740 static int send_connect(struct c4iw_ep *ep)
742 struct cpl_act_open_req *req = NULL;
743 struct cpl_t5_act_open_req *t5req = NULL;
744 struct cpl_t6_act_open_req *t6req = NULL;
745 struct cpl_act_open_req6 *req6 = NULL;
746 struct cpl_t5_act_open_req6 *t5req6 = NULL;
747 struct cpl_t6_act_open_req6 *t6req6 = NULL;
751 unsigned int mtu_idx;
753 int win, sizev4, sizev6, wrlen;
754 struct sockaddr_in *la = (struct sockaddr_in *)
756 struct sockaddr_in *ra = (struct sockaddr_in *)
757 &ep->com.remote_addr;
758 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
760 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
761 &ep->com.remote_addr;
763 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
764 u32 isn = (prandom_u32() & ~7UL) - 1;
766 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
768 sizev4 = sizeof(struct cpl_act_open_req);
769 sizev6 = sizeof(struct cpl_act_open_req6);
772 sizev4 = sizeof(struct cpl_t5_act_open_req);
773 sizev6 = sizeof(struct cpl_t5_act_open_req6);
776 sizev4 = sizeof(struct cpl_t6_act_open_req);
777 sizev6 = sizeof(struct cpl_t6_act_open_req6);
780 pr_err("T%d Chip is not supported\n",
781 CHELSIO_CHIP_VERSION(adapter_type));
785 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
786 roundup(sizev4, 16) :
789 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
791 skb = get_skb(NULL, wrlen, GFP_KERNEL);
793 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
797 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
799 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
800 enable_tcp_timestamps,
801 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
802 wscale = compute_wscale(rcv_win);
805 * Specify the largest window that will fit in opt0. The
806 * remainder will be specified in the rx_data_ack.
808 win = ep->rcv_win >> 10;
809 if (win > RCV_BUFSIZ_M)
812 opt0 = (nocong ? NO_CONG_F : 0) |
815 WND_SCALE_V(wscale) |
817 L2T_IDX_V(ep->l2t->idx) |
818 TX_CHAN_V(ep->tx_chan) |
819 SMAC_SEL_V(ep->smac_idx) |
820 DSCP_V(ep->tos >> 2) |
821 ULP_MODE_V(ULP_MODE_TCPDDP) |
823 opt2 = RX_CHANNEL_V(0) |
824 CCTRL_ECN_V(enable_ecn) |
825 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
826 if (enable_tcp_timestamps)
827 opt2 |= TSTAMPS_EN_F;
830 if (wscale && enable_tcp_window_scaling)
831 opt2 |= WND_SCALE_EN_F;
832 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
836 opt2 |= T5_OPT_2_VALID_F;
837 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
841 if (ep->com.remote_addr.ss_family == AF_INET6)
842 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
843 (const u32 *)&la6->sin6_addr.s6_addr, 1);
845 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
847 if (ep->com.remote_addr.ss_family == AF_INET) {
848 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
850 req = (struct cpl_act_open_req *)skb_put(skb, wrlen);
854 t5req = (struct cpl_t5_act_open_req *)skb_put(skb,
856 INIT_TP_WR(t5req, 0);
857 req = (struct cpl_act_open_req *)t5req;
860 t6req = (struct cpl_t6_act_open_req *)skb_put(skb,
862 INIT_TP_WR(t6req, 0);
863 req = (struct cpl_act_open_req *)t6req;
864 t5req = (struct cpl_t5_act_open_req *)t6req;
867 pr_err("T%d Chip is not supported\n",
868 CHELSIO_CHIP_VERSION(adapter_type));
873 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
874 ((ep->rss_qid<<14) | ep->atid)));
875 req->local_port = la->sin_port;
876 req->peer_port = ra->sin_port;
877 req->local_ip = la->sin_addr.s_addr;
878 req->peer_ip = ra->sin_addr.s_addr;
879 req->opt0 = cpu_to_be64(opt0);
881 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
882 req->params = cpu_to_be32(cxgb4_select_ntuple(
883 ep->com.dev->rdev.lldi.ports[0],
885 req->opt2 = cpu_to_be32(opt2);
887 t5req->params = cpu_to_be64(FILTER_TUPLE_V(
889 ep->com.dev->rdev.lldi.ports[0],
891 t5req->rsvd = cpu_to_be32(isn);
892 PDBG("%s snd_isn %u\n", __func__, t5req->rsvd);
893 t5req->opt2 = cpu_to_be32(opt2);
896 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
898 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
902 t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb,
904 INIT_TP_WR(t5req6, 0);
905 req6 = (struct cpl_act_open_req6 *)t5req6;
908 t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb,
910 INIT_TP_WR(t6req6, 0);
911 req6 = (struct cpl_act_open_req6 *)t6req6;
912 t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
915 pr_err("T%d Chip is not supported\n",
916 CHELSIO_CHIP_VERSION(adapter_type));
921 OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
922 ((ep->rss_qid<<14)|ep->atid)));
923 req6->local_port = la6->sin6_port;
924 req6->peer_port = ra6->sin6_port;
925 req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
926 req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
927 req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
928 req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
929 req6->opt0 = cpu_to_be64(opt0);
931 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
932 req6->params = cpu_to_be32(cxgb4_select_ntuple(
933 ep->com.dev->rdev.lldi.ports[0],
935 req6->opt2 = cpu_to_be32(opt2);
937 t5req6->params = cpu_to_be64(FILTER_TUPLE_V(
939 ep->com.dev->rdev.lldi.ports[0],
941 t5req6->rsvd = cpu_to_be32(isn);
942 PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd);
943 t5req6->opt2 = cpu_to_be32(opt2);
947 set_bit(ACT_OPEN_REQ, &ep->com.history);
948 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
950 if (ret && ep->com.remote_addr.ss_family == AF_INET6)
951 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
952 (const u32 *)&la6->sin6_addr.s6_addr, 1);
956 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
959 int mpalen, wrlen, ret;
960 struct fw_ofld_tx_data_wr *req;
961 struct mpa_message *mpa;
962 struct mpa_v2_conn_params mpa_v2_params;
964 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
966 BUG_ON(skb_cloned(skb));
968 mpalen = sizeof(*mpa) + ep->plen;
969 if (mpa_rev_to_use == 2)
970 mpalen += sizeof(struct mpa_v2_conn_params);
971 wrlen = roundup(mpalen + sizeof *req, 16);
972 skb = get_skb(skb, wrlen, GFP_KERNEL);
974 connect_reply_upcall(ep, -ENOMEM);
977 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
979 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
980 memset(req, 0, wrlen);
981 req->op_to_immdlen = cpu_to_be32(
982 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
984 FW_WR_IMMDLEN_V(mpalen));
985 req->flowid_len16 = cpu_to_be32(
986 FW_WR_FLOWID_V(ep->hwtid) |
987 FW_WR_LEN16_V(wrlen >> 4));
988 req->plen = cpu_to_be32(mpalen);
989 req->tunnel_to_proxy = cpu_to_be32(
990 FW_OFLD_TX_DATA_WR_FLUSH_F |
991 FW_OFLD_TX_DATA_WR_SHOVE_F);
993 mpa = (struct mpa_message *)(req + 1);
994 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
995 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
996 (markers_enabled ? MPA_MARKERS : 0) |
997 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
998 mpa->private_data_size = htons(ep->plen);
999 mpa->revision = mpa_rev_to_use;
1000 if (mpa_rev_to_use == 1) {
1001 ep->tried_with_mpa_v1 = 1;
1002 ep->retry_with_mpa_v1 = 0;
1005 if (mpa_rev_to_use == 2) {
1006 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1007 sizeof (struct mpa_v2_conn_params));
1008 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1010 mpa_v2_params.ird = htons((u16)ep->ird);
1011 mpa_v2_params.ord = htons((u16)ep->ord);
1014 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1015 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1016 mpa_v2_params.ord |=
1017 htons(MPA_V2_RDMA_WRITE_RTR);
1018 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1019 mpa_v2_params.ord |=
1020 htons(MPA_V2_RDMA_READ_RTR);
1022 memcpy(mpa->private_data, &mpa_v2_params,
1023 sizeof(struct mpa_v2_conn_params));
1026 memcpy(mpa->private_data +
1027 sizeof(struct mpa_v2_conn_params),
1028 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1031 memcpy(mpa->private_data,
1032 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1035 * Reference the mpa skb. This ensures the data area
1036 * will remain in memory until the hw acks the tx.
1037 * Function fw4_ack() will deref it.
1040 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
1041 BUG_ON(ep->mpa_skb);
1043 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1047 __state_set(&ep->com, MPA_REQ_SENT);
1048 ep->mpa_attr.initiator = 1;
1049 ep->snd_seq += mpalen;
1053 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1056 struct fw_ofld_tx_data_wr *req;
1057 struct mpa_message *mpa;
1058 struct sk_buff *skb;
1059 struct mpa_v2_conn_params mpa_v2_params;
1061 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
1063 mpalen = sizeof(*mpa) + plen;
1064 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1065 mpalen += sizeof(struct mpa_v2_conn_params);
1066 wrlen = roundup(mpalen + sizeof *req, 16);
1068 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1070 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
1073 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1075 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
1076 memset(req, 0, wrlen);
1077 req->op_to_immdlen = cpu_to_be32(
1078 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1080 FW_WR_IMMDLEN_V(mpalen));
1081 req->flowid_len16 = cpu_to_be32(
1082 FW_WR_FLOWID_V(ep->hwtid) |
1083 FW_WR_LEN16_V(wrlen >> 4));
1084 req->plen = cpu_to_be32(mpalen);
1085 req->tunnel_to_proxy = cpu_to_be32(
1086 FW_OFLD_TX_DATA_WR_FLUSH_F |
1087 FW_OFLD_TX_DATA_WR_SHOVE_F);
1089 mpa = (struct mpa_message *)(req + 1);
1090 memset(mpa, 0, sizeof(*mpa));
1091 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1092 mpa->flags = MPA_REJECT;
1093 mpa->revision = ep->mpa_attr.version;
1094 mpa->private_data_size = htons(plen);
1096 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1097 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1098 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1099 sizeof (struct mpa_v2_conn_params));
1100 mpa_v2_params.ird = htons(((u16)ep->ird) |
1101 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1103 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1105 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1106 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1107 FW_RI_INIT_P2PTYPE_READ_REQ ?
1108 MPA_V2_RDMA_READ_RTR : 0) : 0));
1109 memcpy(mpa->private_data, &mpa_v2_params,
1110 sizeof(struct mpa_v2_conn_params));
1113 memcpy(mpa->private_data +
1114 sizeof(struct mpa_v2_conn_params), pdata, plen);
1117 memcpy(mpa->private_data, pdata, plen);
1120 * Reference the mpa skb again. This ensures the data area
1121 * will remain in memory until the hw acks the tx.
1122 * Function fw4_ack() will deref it.
1125 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1126 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
1127 BUG_ON(ep->mpa_skb);
1129 ep->snd_seq += mpalen;
1130 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1133 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1136 struct fw_ofld_tx_data_wr *req;
1137 struct mpa_message *mpa;
1138 struct sk_buff *skb;
1139 struct mpa_v2_conn_params mpa_v2_params;
1141 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
1143 mpalen = sizeof(*mpa) + plen;
1144 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1145 mpalen += sizeof(struct mpa_v2_conn_params);
1146 wrlen = roundup(mpalen + sizeof *req, 16);
1148 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1150 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
1153 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1155 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
1156 memset(req, 0, wrlen);
1157 req->op_to_immdlen = cpu_to_be32(
1158 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1160 FW_WR_IMMDLEN_V(mpalen));
1161 req->flowid_len16 = cpu_to_be32(
1162 FW_WR_FLOWID_V(ep->hwtid) |
1163 FW_WR_LEN16_V(wrlen >> 4));
1164 req->plen = cpu_to_be32(mpalen);
1165 req->tunnel_to_proxy = cpu_to_be32(
1166 FW_OFLD_TX_DATA_WR_FLUSH_F |
1167 FW_OFLD_TX_DATA_WR_SHOVE_F);
1169 mpa = (struct mpa_message *)(req + 1);
1170 memset(mpa, 0, sizeof(*mpa));
1171 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1172 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1173 (markers_enabled ? MPA_MARKERS : 0);
1174 mpa->revision = ep->mpa_attr.version;
1175 mpa->private_data_size = htons(plen);
1177 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1178 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1179 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1180 sizeof (struct mpa_v2_conn_params));
1181 mpa_v2_params.ird = htons((u16)ep->ird);
1182 mpa_v2_params.ord = htons((u16)ep->ord);
1183 if (peer2peer && (ep->mpa_attr.p2p_type !=
1184 FW_RI_INIT_P2PTYPE_DISABLED)) {
1185 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1187 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1188 mpa_v2_params.ord |=
1189 htons(MPA_V2_RDMA_WRITE_RTR);
1190 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1191 mpa_v2_params.ord |=
1192 htons(MPA_V2_RDMA_READ_RTR);
1195 memcpy(mpa->private_data, &mpa_v2_params,
1196 sizeof(struct mpa_v2_conn_params));
1199 memcpy(mpa->private_data +
1200 sizeof(struct mpa_v2_conn_params), pdata, plen);
1203 memcpy(mpa->private_data, pdata, plen);
1206 * Reference the mpa skb. This ensures the data area
1207 * will remain in memory until the hw acks the tx.
1208 * Function fw4_ack() will deref it.
1211 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
1213 __state_set(&ep->com, MPA_REP_SENT);
1214 ep->snd_seq += mpalen;
1215 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1218 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1221 struct cpl_act_establish *req = cplhdr(skb);
1222 unsigned int tid = GET_TID(req);
1223 unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
1224 struct tid_info *t = dev->rdev.lldi.tids;
1227 ep = lookup_atid(t, atid);
1229 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
1230 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
1232 mutex_lock(&ep->com.mutex);
1233 dst_confirm(ep->dst);
1235 /* setup the hwtid for this connection */
1237 cxgb4_insert_tid(t, ep, tid);
1240 ep->snd_seq = be32_to_cpu(req->snd_isn);
1241 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1243 set_emss(ep, ntohs(req->tcp_opt));
1245 /* dealloc the atid */
1246 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1247 cxgb4_free_atid(t, atid);
1248 set_bit(ACT_ESTAB, &ep->com.history);
1250 /* start MPA negotiation */
1251 ret = send_flowc(ep, NULL);
1254 if (ep->retry_with_mpa_v1)
1255 ret = send_mpa_req(ep, skb, 1);
1257 ret = send_mpa_req(ep, skb, mpa_rev);
1260 mutex_unlock(&ep->com.mutex);
1263 mutex_unlock(&ep->com.mutex);
1264 connect_reply_upcall(ep, -ENOMEM);
1265 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1269 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1271 struct iw_cm_event event;
1273 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1274 memset(&event, 0, sizeof(event));
1275 event.event = IW_CM_EVENT_CLOSE;
1276 event.status = status;
1277 if (ep->com.cm_id) {
1278 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1279 ep, ep->com.cm_id, ep->hwtid);
1280 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1281 deref_cm_id(&ep->com);
1282 set_bit(CLOSE_UPCALL, &ep->com.history);
1286 static void peer_close_upcall(struct c4iw_ep *ep)
1288 struct iw_cm_event event;
1290 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1291 memset(&event, 0, sizeof(event));
1292 event.event = IW_CM_EVENT_DISCONNECT;
1293 if (ep->com.cm_id) {
1294 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1295 ep, ep->com.cm_id, ep->hwtid);
1296 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1297 set_bit(DISCONN_UPCALL, &ep->com.history);
1301 static void peer_abort_upcall(struct c4iw_ep *ep)
1303 struct iw_cm_event event;
1305 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1306 memset(&event, 0, sizeof(event));
1307 event.event = IW_CM_EVENT_CLOSE;
1308 event.status = -ECONNRESET;
1309 if (ep->com.cm_id) {
1310 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
1311 ep->com.cm_id, ep->hwtid);
1312 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1313 deref_cm_id(&ep->com);
1314 set_bit(ABORT_UPCALL, &ep->com.history);
1318 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1320 struct iw_cm_event event;
1322 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
1323 memset(&event, 0, sizeof(event));
1324 event.event = IW_CM_EVENT_CONNECT_REPLY;
1325 event.status = status;
1326 memcpy(&event.local_addr, &ep->com.local_addr,
1327 sizeof(ep->com.local_addr));
1328 memcpy(&event.remote_addr, &ep->com.remote_addr,
1329 sizeof(ep->com.remote_addr));
1331 if ((status == 0) || (status == -ECONNREFUSED)) {
1332 if (!ep->tried_with_mpa_v1) {
1333 /* this means MPA_v2 is used */
1334 event.ord = ep->ird;
1335 event.ird = ep->ord;
1336 event.private_data_len = ep->plen -
1337 sizeof(struct mpa_v2_conn_params);
1338 event.private_data = ep->mpa_pkt +
1339 sizeof(struct mpa_message) +
1340 sizeof(struct mpa_v2_conn_params);
1342 /* this means MPA_v1 is used */
1343 event.ord = cur_max_read_depth(ep->com.dev);
1344 event.ird = cur_max_read_depth(ep->com.dev);
1345 event.private_data_len = ep->plen;
1346 event.private_data = ep->mpa_pkt +
1347 sizeof(struct mpa_message);
1351 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
1353 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1354 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1357 deref_cm_id(&ep->com);
1360 static int connect_request_upcall(struct c4iw_ep *ep)
1362 struct iw_cm_event event;
1365 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1366 memset(&event, 0, sizeof(event));
1367 event.event = IW_CM_EVENT_CONNECT_REQUEST;
1368 memcpy(&event.local_addr, &ep->com.local_addr,
1369 sizeof(ep->com.local_addr));
1370 memcpy(&event.remote_addr, &ep->com.remote_addr,
1371 sizeof(ep->com.remote_addr));
1372 event.provider_data = ep;
1373 if (!ep->tried_with_mpa_v1) {
1374 /* this means MPA_v2 is used */
1375 event.ord = ep->ord;
1376 event.ird = ep->ird;
1377 event.private_data_len = ep->plen -
1378 sizeof(struct mpa_v2_conn_params);
1379 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1380 sizeof(struct mpa_v2_conn_params);
1382 /* this means MPA_v1 is used. Send max supported */
1383 event.ord = cur_max_read_depth(ep->com.dev);
1384 event.ird = cur_max_read_depth(ep->com.dev);
1385 event.private_data_len = ep->plen;
1386 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1388 c4iw_get_ep(&ep->com);
1389 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1392 c4iw_put_ep(&ep->com);
1393 set_bit(CONNREQ_UPCALL, &ep->com.history);
1394 c4iw_put_ep(&ep->parent_ep->com);
1398 static void established_upcall(struct c4iw_ep *ep)
1400 struct iw_cm_event event;
1402 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1403 memset(&event, 0, sizeof(event));
1404 event.event = IW_CM_EVENT_ESTABLISHED;
1405 event.ird = ep->ord;
1406 event.ord = ep->ird;
1407 if (ep->com.cm_id) {
1408 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1409 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1410 set_bit(ESTAB_UPCALL, &ep->com.history);
1414 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1416 struct cpl_rx_data_ack *req;
1417 struct sk_buff *skb;
1418 int wrlen = roundup(sizeof *req, 16);
1420 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1421 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1423 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1428 * If we couldn't specify the entire rcv window at connection setup
1429 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1430 * then add the overage in to the credits returned.
1432 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1433 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1435 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1436 memset(req, 0, wrlen);
1437 INIT_TP_WR(req, ep->hwtid);
1438 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1440 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
1442 RX_DACK_MODE_V(dack_mode));
1443 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
1444 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1448 #define RELAXED_IRD_NEGOTIATION 1
1451 * process_mpa_reply - process streaming mode MPA reply
1455 * 0 upon success indicating a connect request was delivered to the ULP
1456 * or the mpa request is incomplete but valid so far.
1458 * 1 if a failure requires the caller to close the connection.
1460 * 2 if a failure requires the caller to abort the connection.
1462 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1464 struct mpa_message *mpa;
1465 struct mpa_v2_conn_params *mpa_v2_params;
1467 u16 resp_ird, resp_ord;
1468 u8 rtr_mismatch = 0, insuff_ird = 0;
1469 struct c4iw_qp_attributes attrs;
1470 enum c4iw_qp_attr_mask mask;
1474 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1477 * If we get more than the supported amount of private data
1478 * then we must fail this connection.
1480 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1482 goto err_stop_timer;
1486 * copy the new data into our accumulation buffer.
1488 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1490 ep->mpa_pkt_len += skb->len;
1493 * if we don't even have the mpa message, then bail.
1495 if (ep->mpa_pkt_len < sizeof(*mpa))
1497 mpa = (struct mpa_message *) ep->mpa_pkt;
1499 /* Validate MPA header. */
1500 if (mpa->revision > mpa_rev) {
1501 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1502 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1504 goto err_stop_timer;
1506 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1508 goto err_stop_timer;
1511 plen = ntohs(mpa->private_data_size);
1514 * Fail if there's too much private data.
1516 if (plen > MPA_MAX_PRIVATE_DATA) {
1518 goto err_stop_timer;
1522 * If plen does not account for pkt size
1524 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1526 goto err_stop_timer;
1529 ep->plen = (u8) plen;
1532 * If we don't have all the pdata yet, then bail.
1533 * We'll continue process when more data arrives.
1535 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1538 if (mpa->flags & MPA_REJECT) {
1539 err = -ECONNREFUSED;
1540 goto err_stop_timer;
1544 * Stop mpa timer. If it expired, then
1545 * we ignore the MPA reply. process_timeout()
1546 * will abort the connection.
1548 if (stop_ep_timer(ep))
1552 * If we get here we have accumulated the entire mpa
1553 * start reply message including private data. And
1554 * the MPA header is valid.
1556 __state_set(&ep->com, FPDU_MODE);
1557 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1558 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1559 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1560 ep->mpa_attr.version = mpa->revision;
1561 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1563 if (mpa->revision == 2) {
1564 ep->mpa_attr.enhanced_rdma_conn =
1565 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1566 if (ep->mpa_attr.enhanced_rdma_conn) {
1567 mpa_v2_params = (struct mpa_v2_conn_params *)
1568 (ep->mpa_pkt + sizeof(*mpa));
1569 resp_ird = ntohs(mpa_v2_params->ird) &
1570 MPA_V2_IRD_ORD_MASK;
1571 resp_ord = ntohs(mpa_v2_params->ord) &
1572 MPA_V2_IRD_ORD_MASK;
1573 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1574 __func__, resp_ird, resp_ord, ep->ird, ep->ord);
1577 * This is a double-check. Ideally, below checks are
1578 * not required since ird/ord stuff has been taken
1579 * care of in c4iw_accept_cr
1581 if (ep->ird < resp_ord) {
1582 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1583 ep->com.dev->rdev.lldi.max_ordird_qp)
1587 } else if (ep->ird > resp_ord) {
1590 if (ep->ord > resp_ird) {
1591 if (RELAXED_IRD_NEGOTIATION)
1602 if (ntohs(mpa_v2_params->ird) &
1603 MPA_V2_PEER2PEER_MODEL) {
1604 if (ntohs(mpa_v2_params->ord) &
1605 MPA_V2_RDMA_WRITE_RTR)
1606 ep->mpa_attr.p2p_type =
1607 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1608 else if (ntohs(mpa_v2_params->ord) &
1609 MPA_V2_RDMA_READ_RTR)
1610 ep->mpa_attr.p2p_type =
1611 FW_RI_INIT_P2PTYPE_READ_REQ;
1614 } else if (mpa->revision == 1)
1616 ep->mpa_attr.p2p_type = p2p_type;
1618 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1619 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1620 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1621 ep->mpa_attr.recv_marker_enabled,
1622 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1623 ep->mpa_attr.p2p_type, p2p_type);
1626 * If responder's RTR does not match with that of initiator, assign
1627 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1628 * generated when moving QP to RTS state.
1629 * A TERM message will be sent after QP has moved to RTS state
1631 if ((ep->mpa_attr.version == 2) && peer2peer &&
1632 (ep->mpa_attr.p2p_type != p2p_type)) {
1633 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1637 attrs.mpa_attr = ep->mpa_attr;
1638 attrs.max_ird = ep->ird;
1639 attrs.max_ord = ep->ord;
1640 attrs.llp_stream_handle = ep;
1641 attrs.next_state = C4IW_QP_STATE_RTS;
1643 mask = C4IW_QP_ATTR_NEXT_STATE |
1644 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1645 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1647 /* bind QP and TID with INIT_WR */
1648 err = c4iw_modify_qp(ep->com.qp->rhp,
1649 ep->com.qp, mask, &attrs, 1);
1654 * If responder's RTR requirement did not match with what initiator
1655 * supports, generate TERM message
1658 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1659 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1660 attrs.ecode = MPA_NOMATCH_RTR;
1661 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1662 attrs.send_term = 1;
1663 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1664 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1671 * Generate TERM if initiator IRD is not sufficient for responder
1672 * provided ORD. Currently, we do the same behaviour even when
1673 * responder provided IRD is also not sufficient as regards to
1677 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1679 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1680 attrs.ecode = MPA_INSUFF_IRD;
1681 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1682 attrs.send_term = 1;
1683 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1684 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1695 connect_reply_upcall(ep, err);
1700 * process_mpa_request - process streaming mode MPA request
1704 * 0 upon success indicating a connect request was delivered to the ULP
1705 * or the mpa request is incomplete but valid so far.
1707 * 1 if a failure requires the caller to close the connection.
1709 * 2 if a failure requires the caller to abort the connection.
1711 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1713 struct mpa_message *mpa;
1714 struct mpa_v2_conn_params *mpa_v2_params;
1717 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1720 * If we get more than the supported amount of private data
1721 * then we must fail this connection.
1723 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
1724 goto err_stop_timer;
1726 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1729 * Copy the new data into our accumulation buffer.
1731 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1733 ep->mpa_pkt_len += skb->len;
1736 * If we don't even have the mpa message, then bail.
1737 * We'll continue process when more data arrives.
1739 if (ep->mpa_pkt_len < sizeof(*mpa))
1742 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1743 mpa = (struct mpa_message *) ep->mpa_pkt;
1746 * Validate MPA Header.
1748 if (mpa->revision > mpa_rev) {
1749 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1750 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1751 goto err_stop_timer;
1754 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1755 goto err_stop_timer;
1757 plen = ntohs(mpa->private_data_size);
1760 * Fail if there's too much private data.
1762 if (plen > MPA_MAX_PRIVATE_DATA)
1763 goto err_stop_timer;
1766 * If plen does not account for pkt size
1768 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1769 goto err_stop_timer;
1770 ep->plen = (u8) plen;
1773 * If we don't have all the pdata yet, then bail.
1775 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1779 * If we get here we have accumulated the entire mpa
1780 * start reply message including private data.
1782 ep->mpa_attr.initiator = 0;
1783 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1784 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1785 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1786 ep->mpa_attr.version = mpa->revision;
1787 if (mpa->revision == 1)
1788 ep->tried_with_mpa_v1 = 1;
1789 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1791 if (mpa->revision == 2) {
1792 ep->mpa_attr.enhanced_rdma_conn =
1793 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1794 if (ep->mpa_attr.enhanced_rdma_conn) {
1795 mpa_v2_params = (struct mpa_v2_conn_params *)
1796 (ep->mpa_pkt + sizeof(*mpa));
1797 ep->ird = ntohs(mpa_v2_params->ird) &
1798 MPA_V2_IRD_ORD_MASK;
1799 ep->ord = ntohs(mpa_v2_params->ord) &
1800 MPA_V2_IRD_ORD_MASK;
1801 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1803 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1805 if (ntohs(mpa_v2_params->ord) &
1806 MPA_V2_RDMA_WRITE_RTR)
1807 ep->mpa_attr.p2p_type =
1808 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1809 else if (ntohs(mpa_v2_params->ord) &
1810 MPA_V2_RDMA_READ_RTR)
1811 ep->mpa_attr.p2p_type =
1812 FW_RI_INIT_P2PTYPE_READ_REQ;
1815 } else if (mpa->revision == 1)
1817 ep->mpa_attr.p2p_type = p2p_type;
1819 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1820 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1821 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1822 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1823 ep->mpa_attr.p2p_type);
1825 __state_set(&ep->com, MPA_REQ_RCVD);
1828 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
1829 if (ep->parent_ep->com.state != DEAD) {
1830 if (connect_request_upcall(ep))
1831 goto err_unlock_parent;
1833 goto err_unlock_parent;
1835 mutex_unlock(&ep->parent_ep->com.mutex);
1839 mutex_unlock(&ep->parent_ep->com.mutex);
1842 (void)stop_ep_timer(ep);
1847 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1850 struct cpl_rx_data *hdr = cplhdr(skb);
1851 unsigned int dlen = ntohs(hdr->len);
1852 unsigned int tid = GET_TID(hdr);
1853 __u8 status = hdr->status;
1856 ep = get_ep_from_tid(dev, tid);
1859 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1860 skb_pull(skb, sizeof(*hdr));
1861 skb_trim(skb, dlen);
1862 mutex_lock(&ep->com.mutex);
1864 /* update RX credits */
1865 update_rx_credits(ep, dlen);
1867 switch (ep->com.state) {
1869 ep->rcv_seq += dlen;
1870 disconnect = process_mpa_reply(ep, skb);
1873 ep->rcv_seq += dlen;
1874 disconnect = process_mpa_request(ep, skb);
1877 struct c4iw_qp_attributes attrs;
1878 BUG_ON(!ep->com.qp);
1880 pr_err("%s Unexpected streaming data." \
1881 " qpid %u ep %p state %d tid %u status %d\n",
1882 __func__, ep->com.qp->wq.sq.qid, ep,
1883 ep->com.state, ep->hwtid, status);
1884 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1885 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1886 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1893 mutex_unlock(&ep->com.mutex);
1895 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
1896 c4iw_put_ep(&ep->com);
1900 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1903 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1905 unsigned int tid = GET_TID(rpl);
1907 ep = get_ep_from_tid(dev, tid);
1909 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1912 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1913 mutex_lock(&ep->com.mutex);
1914 switch (ep->com.state) {
1916 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1917 __state_set(&ep->com, DEAD);
1921 printk(KERN_ERR "%s ep %p state %d\n",
1922 __func__, ep, ep->com.state);
1925 mutex_unlock(&ep->com.mutex);
1928 release_ep_resources(ep);
1929 c4iw_put_ep(&ep->com);
1933 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1935 struct sk_buff *skb;
1936 struct fw_ofld_connection_wr *req;
1937 unsigned int mtu_idx;
1939 struct sockaddr_in *sin;
1942 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1943 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1944 memset(req, 0, sizeof(*req));
1945 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
1946 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1947 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1948 ep->com.dev->rdev.lldi.ports[0],
1950 sin = (struct sockaddr_in *)&ep->com.local_addr;
1951 req->le.lport = sin->sin_port;
1952 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
1953 sin = (struct sockaddr_in *)&ep->com.remote_addr;
1954 req->le.pport = sin->sin_port;
1955 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
1956 req->tcb.t_state_to_astid =
1957 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
1958 FW_OFLD_CONNECTION_WR_ASTID_V(atid));
1959 req->tcb.cplrxdataack_cplpassacceptrpl =
1960 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
1961 req->tcb.tx_max = (__force __be32) jiffies;
1962 req->tcb.rcv_adv = htons(1);
1963 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1964 enable_tcp_timestamps,
1965 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
1966 wscale = compute_wscale(rcv_win);
1969 * Specify the largest window that will fit in opt0. The
1970 * remainder will be specified in the rx_data_ack.
1972 win = ep->rcv_win >> 10;
1973 if (win > RCV_BUFSIZ_M)
1976 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
1977 (nocong ? NO_CONG_F : 0) |
1980 WND_SCALE_V(wscale) |
1981 MSS_IDX_V(mtu_idx) |
1982 L2T_IDX_V(ep->l2t->idx) |
1983 TX_CHAN_V(ep->tx_chan) |
1984 SMAC_SEL_V(ep->smac_idx) |
1985 DSCP_V(ep->tos >> 2) |
1986 ULP_MODE_V(ULP_MODE_TCPDDP) |
1988 req->tcb.opt2 = (__force __be32) (PACE_V(1) |
1989 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1991 CCTRL_ECN_V(enable_ecn) |
1992 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
1993 if (enable_tcp_timestamps)
1994 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
1995 if (enable_tcp_sack)
1996 req->tcb.opt2 |= (__force __be32)SACK_EN_F;
1997 if (wscale && enable_tcp_window_scaling)
1998 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1999 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
2000 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
2001 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
2002 set_bit(ACT_OFLD_CONN, &ep->com.history);
2003 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2007 * Return whether a failed active open has allocated a TID
2009 static inline int act_open_has_tid(int status)
2011 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
2012 status != CPL_ERR_ARP_MISS;
2015 /* Returns whether a CPL status conveys negative advice.
2017 static int is_neg_adv(unsigned int status)
2019 return status == CPL_ERR_RTX_NEG_ADVICE ||
2020 status == CPL_ERR_PERSIST_NEG_ADVICE ||
2021 status == CPL_ERR_KEEPALV_NEG_ADVICE;
2024 static char *neg_adv_str(unsigned int status)
2027 case CPL_ERR_RTX_NEG_ADVICE:
2028 return "Retransmit timeout";
2029 case CPL_ERR_PERSIST_NEG_ADVICE:
2030 return "Persist timeout";
2031 case CPL_ERR_KEEPALV_NEG_ADVICE:
2032 return "Keepalive timeout";
2038 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
2040 ep->snd_win = snd_win;
2041 ep->rcv_win = rcv_win;
2042 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
2045 #define ACT_OPEN_RETRY_COUNT 2
2047 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
2048 struct dst_entry *dst, struct c4iw_dev *cdev,
2049 bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
2051 struct neighbour *n;
2053 struct net_device *pdev;
2055 n = dst_neigh_lookup(dst, peer_ip);
2061 if (n->dev->flags & IFF_LOOPBACK) {
2063 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
2064 else if (IS_ENABLED(CONFIG_IPV6))
2065 for_each_netdev(&init_net, pdev) {
2066 if (ipv6_chk_addr(&init_net,
2067 (struct in6_addr *)peer_ip,
2078 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2079 n, pdev, rt_tos2priority(tos));
2084 ep->mtu = pdev->mtu;
2085 ep->tx_chan = cxgb4_port_chan(pdev);
2086 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
2087 cxgb4_port_viid(pdev));
2088 step = cdev->rdev.lldi.ntxq /
2089 cdev->rdev.lldi.nchan;
2090 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2091 step = cdev->rdev.lldi.nrxq /
2092 cdev->rdev.lldi.nchan;
2093 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2094 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2095 cxgb4_port_idx(pdev) * step];
2096 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2099 pdev = get_real_dev(n->dev);
2100 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2104 ep->mtu = dst_mtu(dst);
2105 ep->tx_chan = cxgb4_port_chan(pdev);
2106 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
2107 cxgb4_port_viid(pdev));
2108 step = cdev->rdev.lldi.ntxq /
2109 cdev->rdev.lldi.nchan;
2110 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2111 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2112 step = cdev->rdev.lldi.nrxq /
2113 cdev->rdev.lldi.nchan;
2114 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2115 cxgb4_port_idx(pdev) * step];
2116 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2119 ep->retry_with_mpa_v1 = 0;
2120 ep->tried_with_mpa_v1 = 0;
2132 static int c4iw_reconnect(struct c4iw_ep *ep)
2135 struct sockaddr_in *laddr = (struct sockaddr_in *)
2136 &ep->com.cm_id->m_local_addr;
2137 struct sockaddr_in *raddr = (struct sockaddr_in *)
2138 &ep->com.cm_id->m_remote_addr;
2139 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
2140 &ep->com.cm_id->m_local_addr;
2141 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
2142 &ep->com.cm_id->m_remote_addr;
2146 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
2147 init_timer(&ep->timer);
2148 c4iw_init_wr_wait(&ep->com.wr_wait);
2151 * Allocate an active TID to initiate a TCP connection.
2153 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
2154 if (ep->atid == -1) {
2155 pr_err("%s - cannot alloc atid.\n", __func__);
2159 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
2162 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
2163 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
2164 raddr->sin_addr.s_addr, laddr->sin_port,
2165 raddr->sin_port, ep->com.cm_id->tos);
2167 ra = (__u8 *)&raddr->sin_addr;
2169 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
2170 raddr6->sin6_addr.s6_addr,
2171 laddr6->sin6_port, raddr6->sin6_port, 0,
2172 raddr6->sin6_scope_id);
2174 ra = (__u8 *)&raddr6->sin6_addr;
2177 pr_err("%s - cannot find route.\n", __func__);
2178 err = -EHOSTUNREACH;
2181 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
2182 ep->com.dev->rdev.lldi.adapter_type,
2183 ep->com.cm_id->tos);
2185 pr_err("%s - cannot alloc l2e.\n", __func__);
2189 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2190 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2193 state_set(&ep->com, CONNECTING);
2194 ep->tos = ep->com.cm_id->tos;
2196 /* send connect request to rnic */
2197 err = send_connect(ep);
2201 cxgb4_l2t_release(ep->l2t);
2203 dst_release(ep->dst);
2205 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2206 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2209 * remember to send notification to upper layer.
2210 * We are in here so the upper layer is not aware that this is
2211 * re-connect attempt and so, upper layer is still waiting for
2212 * response of 1st connect request.
2214 connect_reply_upcall(ep, -ECONNRESET);
2215 c4iw_put_ep(&ep->com);
2220 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2223 struct cpl_act_open_rpl *rpl = cplhdr(skb);
2224 unsigned int atid = TID_TID_G(AOPEN_ATID_G(
2225 ntohl(rpl->atid_status)));
2226 struct tid_info *t = dev->rdev.lldi.tids;
2227 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2228 struct sockaddr_in *la;
2229 struct sockaddr_in *ra;
2230 struct sockaddr_in6 *la6;
2231 struct sockaddr_in6 *ra6;
2234 ep = lookup_atid(t, atid);
2235 la = (struct sockaddr_in *)&ep->com.local_addr;
2236 ra = (struct sockaddr_in *)&ep->com.remote_addr;
2237 la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2238 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
2240 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
2241 status, status2errno(status));
2243 if (is_neg_adv(status)) {
2244 PDBG("%s Connection problems for atid %u status %u (%s)\n",
2245 __func__, atid, status, neg_adv_str(status));
2246 ep->stats.connect_neg_adv++;
2247 mutex_lock(&dev->rdev.stats.lock);
2248 dev->rdev.stats.neg_adv++;
2249 mutex_unlock(&dev->rdev.stats.lock);
2253 set_bit(ACT_OPEN_RPL, &ep->com.history);
2256 * Log interesting failures.
2259 case CPL_ERR_CONN_RESET:
2260 case CPL_ERR_CONN_TIMEDOUT:
2262 case CPL_ERR_TCAM_FULL:
2263 mutex_lock(&dev->rdev.stats.lock);
2264 dev->rdev.stats.tcam_full++;
2265 mutex_unlock(&dev->rdev.stats.lock);
2266 if (ep->com.local_addr.ss_family == AF_INET &&
2267 dev->rdev.lldi.enable_fw_ofld_conn) {
2268 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
2269 ntohl(rpl->atid_status))));
2275 case CPL_ERR_CONN_EXIST:
2276 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2277 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2278 if (ep->com.remote_addr.ss_family == AF_INET6) {
2279 struct sockaddr_in6 *sin6 =
2280 (struct sockaddr_in6 *)
2281 &ep->com.local_addr;
2283 ep->com.dev->rdev.lldi.ports[0],
2285 &sin6->sin6_addr.s6_addr, 1);
2287 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
2289 cxgb4_free_atid(t, atid);
2290 dst_release(ep->dst);
2291 cxgb4_l2t_release(ep->l2t);
2297 if (ep->com.local_addr.ss_family == AF_INET) {
2298 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2299 atid, status, status2errno(status),
2300 &la->sin_addr.s_addr, ntohs(la->sin_port),
2301 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
2303 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2304 atid, status, status2errno(status),
2305 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2306 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2312 connect_reply_upcall(ep, status2errno(status));
2313 state_set(&ep->com, DEAD);
2315 if (ep->com.remote_addr.ss_family == AF_INET6) {
2316 struct sockaddr_in6 *sin6 =
2317 (struct sockaddr_in6 *)&ep->com.local_addr;
2318 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
2319 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2321 if (status && act_open_has_tid(status))
2322 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
2324 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
2325 cxgb4_free_atid(t, atid);
2326 dst_release(ep->dst);
2327 cxgb4_l2t_release(ep->l2t);
2328 c4iw_put_ep(&ep->com);
2333 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2335 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
2336 unsigned int stid = GET_TID(rpl);
2337 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2340 PDBG("%s stid %d lookup failure!\n", __func__, stid);
2343 PDBG("%s ep %p status %d error %d\n", __func__, ep,
2344 rpl->status, status2errno(rpl->status));
2345 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2346 c4iw_put_ep(&ep->com);
2351 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2353 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
2354 unsigned int stid = GET_TID(rpl);
2355 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2357 PDBG("%s ep %p\n", __func__, ep);
2358 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2359 c4iw_put_ep(&ep->com);
2363 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2364 struct cpl_pass_accept_req *req)
2366 struct cpl_pass_accept_rpl *rpl;
2367 unsigned int mtu_idx;
2371 struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
2373 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
2375 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2376 BUG_ON(skb_cloned(skb));
2380 if (!is_t4(adapter_type)) {
2381 skb_trim(skb, roundup(sizeof(*rpl5), 16));
2383 INIT_TP_WR(rpl5, ep->hwtid);
2385 skb_trim(skb, sizeof(*rpl));
2386 INIT_TP_WR(rpl, ep->hwtid);
2388 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2391 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2392 enable_tcp_timestamps && req->tcpopt.tstamp,
2393 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
2394 wscale = compute_wscale(rcv_win);
2397 * Specify the largest window that will fit in opt0. The
2398 * remainder will be specified in the rx_data_ack.
2400 win = ep->rcv_win >> 10;
2401 if (win > RCV_BUFSIZ_M)
2403 opt0 = (nocong ? NO_CONG_F : 0) |
2406 WND_SCALE_V(wscale) |
2407 MSS_IDX_V(mtu_idx) |
2408 L2T_IDX_V(ep->l2t->idx) |
2409 TX_CHAN_V(ep->tx_chan) |
2410 SMAC_SEL_V(ep->smac_idx) |
2411 DSCP_V(ep->tos >> 2) |
2412 ULP_MODE_V(ULP_MODE_TCPDDP) |
2414 opt2 = RX_CHANNEL_V(0) |
2415 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2417 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2418 opt2 |= TSTAMPS_EN_F;
2419 if (enable_tcp_sack && req->tcpopt.sack)
2421 if (wscale && enable_tcp_window_scaling)
2422 opt2 |= WND_SCALE_EN_F;
2424 const struct tcphdr *tcph;
2425 u32 hlen = ntohl(req->hdr_len);
2427 if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
2428 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
2431 tcph = (const void *)(req + 1) +
2432 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
2433 if (tcph->ece && tcph->cwr)
2434 opt2 |= CCTRL_ECN_V(1);
2436 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
2437 u32 isn = (prandom_u32() & ~7UL) - 1;
2438 opt2 |= T5_OPT_2_VALID_F;
2439 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
2442 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2445 rpl5->iss = cpu_to_be32(isn);
2446 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
2449 rpl->opt0 = cpu_to_be64(opt0);
2450 rpl->opt2 = cpu_to_be32(opt2);
2451 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2452 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
2454 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2457 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2459 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2460 BUG_ON(skb_cloned(skb));
2461 skb_trim(skb, sizeof(struct cpl_tid_release));
2462 release_tid(&dev->rdev, hwtid, skb);
2466 static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
2467 int *iptype, __u8 *local_ip, __u8 *peer_ip,
2468 __be16 *local_port, __be16 *peer_port)
2470 int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
2471 ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
2472 T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
2473 int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
2474 IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
2475 T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
2476 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
2477 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
2478 struct tcphdr *tcp = (struct tcphdr *)
2479 ((u8 *)(req + 1) + eth_len + ip_len);
2481 if (ip->version == 4) {
2482 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
2483 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
2486 memcpy(peer_ip, &ip->saddr, 4);
2487 memcpy(local_ip, &ip->daddr, 4);
2489 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
2490 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
2493 memcpy(peer_ip, ip6->saddr.s6_addr, 16);
2494 memcpy(local_ip, ip6->daddr.s6_addr, 16);
2496 *peer_port = tcp->source;
2497 *local_port = tcp->dest;
2502 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2504 struct c4iw_ep *child_ep = NULL, *parent_ep;
2505 struct cpl_pass_accept_req *req = cplhdr(skb);
2506 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
2507 struct tid_info *t = dev->rdev.lldi.tids;
2508 unsigned int hwtid = GET_TID(req);
2509 struct dst_entry *dst;
2510 __u8 local_ip[16], peer_ip[16];
2511 __be16 local_port, peer_port;
2512 struct sockaddr_in6 *sin6;
2514 u16 peer_mss = ntohs(req->tcpopt.mss);
2516 unsigned short hdrs;
2517 u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
2519 parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
2521 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2525 if (state_read(&parent_ep->com) != LISTEN) {
2526 PDBG("%s - listening ep not in LISTEN\n", __func__);
2530 get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype,
2531 local_ip, peer_ip, &local_port, &peer_port);
2533 /* Find output route */
2535 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2536 , __func__, parent_ep, hwtid,
2537 local_ip, peer_ip, ntohs(local_port),
2538 ntohs(peer_port), peer_mss);
2539 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
2540 local_port, peer_port,
2543 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2544 , __func__, parent_ep, hwtid,
2545 local_ip, peer_ip, ntohs(local_port),
2546 ntohs(peer_port), peer_mss);
2547 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
2548 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
2549 ((struct sockaddr_in6 *)
2550 &parent_ep->com.local_addr)->sin6_scope_id);
2553 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
2558 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2560 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
2566 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
2567 parent_ep->com.dev->rdev.lldi.adapter_type, tos);
2569 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
2576 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
2577 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2578 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2579 child_ep->mtu = peer_mss + hdrs;
2581 state_set(&child_ep->com, CONNECTING);
2582 child_ep->com.dev = dev;
2583 child_ep->com.cm_id = NULL;
2586 struct sockaddr_in *sin = (struct sockaddr_in *)
2587 &child_ep->com.local_addr;
2589 sin->sin_family = PF_INET;
2590 sin->sin_port = local_port;
2591 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2593 sin = (struct sockaddr_in *)&child_ep->com.local_addr;
2594 sin->sin_family = PF_INET;
2595 sin->sin_port = ((struct sockaddr_in *)
2596 &parent_ep->com.local_addr)->sin_port;
2597 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2599 sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2600 sin->sin_family = PF_INET;
2601 sin->sin_port = peer_port;
2602 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2604 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2605 sin6->sin6_family = PF_INET6;
2606 sin6->sin6_port = local_port;
2607 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2609 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2610 sin6->sin6_family = PF_INET6;
2611 sin6->sin6_port = ((struct sockaddr_in6 *)
2612 &parent_ep->com.local_addr)->sin6_port;
2613 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2615 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2616 sin6->sin6_family = PF_INET6;
2617 sin6->sin6_port = peer_port;
2618 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2621 c4iw_get_ep(&parent_ep->com);
2622 child_ep->parent_ep = parent_ep;
2623 child_ep->tos = tos;
2624 child_ep->dst = dst;
2625 child_ep->hwtid = hwtid;
2627 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
2628 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
2630 init_timer(&child_ep->timer);
2631 cxgb4_insert_tid(t, child_ep, hwtid);
2632 insert_ep_tid(child_ep);
2633 if (accept_cr(child_ep, skb, req)) {
2634 c4iw_put_ep(&parent_ep->com);
2635 release_ep_resources(child_ep);
2637 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2640 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2641 cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
2642 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2646 reject_cr(dev, hwtid, skb);
2648 c4iw_put_ep(&parent_ep->com);
2653 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2656 struct cpl_pass_establish *req = cplhdr(skb);
2657 unsigned int tid = GET_TID(req);
2660 ep = get_ep_from_tid(dev, tid);
2661 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2662 ep->snd_seq = be32_to_cpu(req->snd_isn);
2663 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2665 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2666 ntohs(req->tcp_opt));
2668 set_emss(ep, ntohs(req->tcp_opt));
2670 dst_confirm(ep->dst);
2671 mutex_lock(&ep->com.mutex);
2672 ep->com.state = MPA_REQ_WAIT;
2674 set_bit(PASS_ESTAB, &ep->com.history);
2675 ret = send_flowc(ep, skb);
2676 mutex_unlock(&ep->com.mutex);
2678 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2679 c4iw_put_ep(&ep->com);
2684 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2686 struct cpl_peer_close *hdr = cplhdr(skb);
2688 struct c4iw_qp_attributes attrs;
2691 unsigned int tid = GET_TID(hdr);
2694 ep = get_ep_from_tid(dev, tid);
2698 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2699 dst_confirm(ep->dst);
2701 set_bit(PEER_CLOSE, &ep->com.history);
2702 mutex_lock(&ep->com.mutex);
2703 switch (ep->com.state) {
2705 __state_set(&ep->com, CLOSING);
2708 __state_set(&ep->com, CLOSING);
2709 connect_reply_upcall(ep, -ECONNRESET);
2714 * We're gonna mark this puppy DEAD, but keep
2715 * the reference on it until the ULP accepts or
2716 * rejects the CR. Also wake up anyone waiting
2717 * in rdma connection migration (see c4iw_accept_cr()).
2719 __state_set(&ep->com, CLOSING);
2720 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2721 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2724 __state_set(&ep->com, CLOSING);
2725 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2726 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2730 __state_set(&ep->com, CLOSING);
2731 attrs.next_state = C4IW_QP_STATE_CLOSING;
2732 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2733 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2734 if (ret != -ECONNRESET) {
2735 peer_close_upcall(ep);
2743 __state_set(&ep->com, MORIBUND);
2747 (void)stop_ep_timer(ep);
2748 if (ep->com.cm_id && ep->com.qp) {
2749 attrs.next_state = C4IW_QP_STATE_IDLE;
2750 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2751 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2753 close_complete_upcall(ep, 0);
2754 __state_set(&ep->com, DEAD);
2764 mutex_unlock(&ep->com.mutex);
2766 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2768 release_ep_resources(ep);
2769 c4iw_put_ep(&ep->com);
2773 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2775 struct cpl_abort_req_rss *req = cplhdr(skb);
2777 struct cpl_abort_rpl *rpl;
2778 struct sk_buff *rpl_skb;
2779 struct c4iw_qp_attributes attrs;
2782 unsigned int tid = GET_TID(req);
2784 ep = get_ep_from_tid(dev, tid);
2788 if (is_neg_adv(req->status)) {
2789 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
2790 __func__, ep->hwtid, req->status,
2791 neg_adv_str(req->status));
2792 ep->stats.abort_neg_adv++;
2793 mutex_lock(&dev->rdev.stats.lock);
2794 dev->rdev.stats.neg_adv++;
2795 mutex_unlock(&dev->rdev.stats.lock);
2798 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2800 set_bit(PEER_ABORT, &ep->com.history);
2803 * Wake up any threads in rdma_init() or rdma_fini().
2804 * However, this is not needed if com state is just
2807 if (ep->com.state != MPA_REQ_SENT)
2808 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2810 mutex_lock(&ep->com.mutex);
2811 switch (ep->com.state) {
2813 c4iw_put_ep(&ep->parent_ep->com);
2816 (void)stop_ep_timer(ep);
2819 (void)stop_ep_timer(ep);
2820 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
2821 connect_reply_upcall(ep, -ECONNRESET);
2824 * we just don't send notification upwards because we
2825 * want to retry with mpa_v1 without upper layers even
2828 * do some housekeeping so as to re-initiate the
2831 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2833 ep->retry_with_mpa_v1 = 1;
2845 if (ep->com.cm_id && ep->com.qp) {
2846 attrs.next_state = C4IW_QP_STATE_ERROR;
2847 ret = c4iw_modify_qp(ep->com.qp->rhp,
2848 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2852 "%s - qp <- error failed!\n",
2855 peer_abort_upcall(ep);
2860 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2861 mutex_unlock(&ep->com.mutex);
2867 dst_confirm(ep->dst);
2868 if (ep->com.state != ABORTING) {
2869 __state_set(&ep->com, DEAD);
2870 /* we don't release if we want to retry with mpa_v1 */
2871 if (!ep->retry_with_mpa_v1)
2874 mutex_unlock(&ep->com.mutex);
2876 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
2878 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
2883 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2884 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2885 INIT_TP_WR(rpl, ep->hwtid);
2886 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2887 rpl->cmd = CPL_ABORT_NO_RST;
2888 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2891 release_ep_resources(ep);
2892 else if (ep->retry_with_mpa_v1) {
2893 if (ep->com.remote_addr.ss_family == AF_INET6) {
2894 struct sockaddr_in6 *sin6 =
2895 (struct sockaddr_in6 *)
2896 &ep->com.local_addr;
2898 ep->com.dev->rdev.lldi.ports[0],
2899 (const u32 *)&sin6->sin6_addr.s6_addr,
2902 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
2903 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2904 dst_release(ep->dst);
2905 cxgb4_l2t_release(ep->l2t);
2910 c4iw_put_ep(&ep->com);
2911 /* Dereferencing ep, referenced in peer_abort_intr() */
2912 c4iw_put_ep(&ep->com);
2916 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2919 struct c4iw_qp_attributes attrs;
2920 struct cpl_close_con_rpl *rpl = cplhdr(skb);
2922 unsigned int tid = GET_TID(rpl);
2924 ep = get_ep_from_tid(dev, tid);
2928 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2931 /* The cm_id may be null if we failed to connect */
2932 mutex_lock(&ep->com.mutex);
2933 set_bit(CLOSE_CON_RPL, &ep->com.history);
2934 switch (ep->com.state) {
2936 __state_set(&ep->com, MORIBUND);
2939 (void)stop_ep_timer(ep);
2940 if ((ep->com.cm_id) && (ep->com.qp)) {
2941 attrs.next_state = C4IW_QP_STATE_IDLE;
2942 c4iw_modify_qp(ep->com.qp->rhp,
2944 C4IW_QP_ATTR_NEXT_STATE,
2947 close_complete_upcall(ep, 0);
2948 __state_set(&ep->com, DEAD);
2958 mutex_unlock(&ep->com.mutex);
2960 release_ep_resources(ep);
2961 c4iw_put_ep(&ep->com);
2965 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2967 struct cpl_rdma_terminate *rpl = cplhdr(skb);
2968 unsigned int tid = GET_TID(rpl);
2970 struct c4iw_qp_attributes attrs;
2972 ep = get_ep_from_tid(dev, tid);
2975 if (ep && ep->com.qp) {
2976 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2977 ep->com.qp->wq.sq.qid);
2978 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2979 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2980 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2982 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2983 c4iw_put_ep(&ep->com);
2989 * Upcall from the adapter indicating data has been transmitted.
2990 * For us its just the single MPA request or reply. We can now free
2991 * the skb holding the mpa message.
2993 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2996 struct cpl_fw4_ack *hdr = cplhdr(skb);
2997 u8 credits = hdr->credits;
2998 unsigned int tid = GET_TID(hdr);
3001 ep = get_ep_from_tid(dev, tid);
3004 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
3006 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
3007 __func__, ep, ep->hwtid, state_read(&ep->com));
3011 dst_confirm(ep->dst);
3013 PDBG("%s last streaming msg ack ep %p tid %u state %u "
3014 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
3015 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
3016 kfree_skb(ep->mpa_skb);
3018 mutex_lock(&ep->com.mutex);
3019 if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
3021 mutex_unlock(&ep->com.mutex);
3024 c4iw_put_ep(&ep->com);
3028 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
3032 struct c4iw_ep *ep = to_ep(cm_id);
3033 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
3035 mutex_lock(&ep->com.mutex);
3036 if (ep->com.state != MPA_REQ_RCVD) {
3037 mutex_unlock(&ep->com.mutex);
3038 c4iw_put_ep(&ep->com);
3041 set_bit(ULP_REJECT, &ep->com.history);
3045 err = send_mpa_reject(ep, pdata, pdata_len);
3048 mutex_unlock(&ep->com.mutex);
3051 err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
3053 c4iw_put_ep(&ep->com);
3057 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3060 struct c4iw_qp_attributes attrs;
3061 enum c4iw_qp_attr_mask mask;
3062 struct c4iw_ep *ep = to_ep(cm_id);
3063 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
3064 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
3067 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
3069 mutex_lock(&ep->com.mutex);
3070 if (ep->com.state != MPA_REQ_RCVD) {
3077 set_bit(ULP_ACCEPT, &ep->com.history);
3078 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
3079 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
3084 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3085 if (conn_param->ord > ep->ird) {
3086 if (RELAXED_IRD_NEGOTIATION) {
3089 ep->ird = conn_param->ird;
3090 ep->ord = conn_param->ord;
3091 send_mpa_reject(ep, conn_param->private_data,
3092 conn_param->private_data_len);
3097 if (conn_param->ird < ep->ord) {
3098 if (RELAXED_IRD_NEGOTIATION &&
3099 ep->ord <= h->rdev.lldi.max_ordird_qp) {
3100 conn_param->ird = ep->ord;
3107 ep->ird = conn_param->ird;
3108 ep->ord = conn_param->ord;
3110 if (ep->mpa_attr.version == 1) {
3111 if (peer2peer && ep->ird == 0)
3115 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
3116 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
3120 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
3122 ep->com.cm_id = cm_id;
3123 ref_cm_id(&ep->com);
3127 /* bind QP to EP and move to RTS */
3128 attrs.mpa_attr = ep->mpa_attr;
3129 attrs.max_ird = ep->ird;
3130 attrs.max_ord = ep->ord;
3131 attrs.llp_stream_handle = ep;
3132 attrs.next_state = C4IW_QP_STATE_RTS;
3134 /* bind QP and TID with INIT_WR */
3135 mask = C4IW_QP_ATTR_NEXT_STATE |
3136 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
3137 C4IW_QP_ATTR_MPA_ATTR |
3138 C4IW_QP_ATTR_MAX_IRD |
3139 C4IW_QP_ATTR_MAX_ORD;
3141 err = c4iw_modify_qp(ep->com.qp->rhp,
3142 ep->com.qp, mask, &attrs, 1);
3144 goto err_deref_cm_id;
3146 set_bit(STOP_MPA_TIMER, &ep->com.flags);
3147 err = send_mpa_reply(ep, conn_param->private_data,
3148 conn_param->private_data_len);
3150 goto err_deref_cm_id;
3152 __state_set(&ep->com, FPDU_MODE);
3153 established_upcall(ep);
3154 mutex_unlock(&ep->com.mutex);
3155 c4iw_put_ep(&ep->com);
3158 deref_cm_id(&ep->com);
3162 mutex_unlock(&ep->com.mutex);
3164 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3165 c4iw_put_ep(&ep->com);
3169 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3171 struct in_device *ind;
3173 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3174 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
3176 ind = in_dev_get(dev->rdev.lldi.ports[0]);
3178 return -EADDRNOTAVAIL;
3179 for_primary_ifa(ind) {
3180 laddr->sin_addr.s_addr = ifa->ifa_address;
3181 raddr->sin_addr.s_addr = ifa->ifa_address;
3187 return found ? 0 : -EADDRNOTAVAIL;
3190 static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
3191 unsigned char banned_flags)
3193 struct inet6_dev *idev;
3194 int err = -EADDRNOTAVAIL;
3197 idev = __in6_dev_get(dev);
3199 struct inet6_ifaddr *ifp;
3201 read_lock_bh(&idev->lock);
3202 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3203 if (ifp->scope == IFA_LINK &&
3204 !(ifp->flags & banned_flags)) {
3205 memcpy(addr, &ifp->addr, 16);
3210 read_unlock_bh(&idev->lock);
3216 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3218 struct in6_addr uninitialized_var(addr);
3219 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3220 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
3222 if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
3223 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
3224 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
3227 return -EADDRNOTAVAIL;
3230 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3232 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3235 struct sockaddr_in *laddr;
3236 struct sockaddr_in *raddr;
3237 struct sockaddr_in6 *laddr6;
3238 struct sockaddr_in6 *raddr6;
3242 if ((conn_param->ord > cur_max_read_depth(dev)) ||
3243 (conn_param->ird > cur_max_read_depth(dev))) {
3247 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3249 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3253 init_timer(&ep->timer);
3254 ep->plen = conn_param->private_data_len;
3256 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3257 conn_param->private_data, ep->plen);
3258 ep->ird = conn_param->ird;
3259 ep->ord = conn_param->ord;
3261 if (peer2peer && ep->ord == 0)
3264 ep->com.cm_id = cm_id;
3265 ref_cm_id(&ep->com);
3267 ep->com.qp = get_qhp(dev, conn_param->qpn);
3269 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
3274 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
3278 * Allocate an active TID to initiate a TCP connection.
3280 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3281 if (ep->atid == -1) {
3282 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
3286 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
3288 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3289 sizeof(ep->com.local_addr));
3290 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
3291 sizeof(ep->com.remote_addr));
3293 laddr = (struct sockaddr_in *)&ep->com.local_addr;
3294 raddr = (struct sockaddr_in *)&ep->com.remote_addr;
3295 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3296 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
3298 if (cm_id->m_remote_addr.ss_family == AF_INET) {
3300 ra = (__u8 *)&raddr->sin_addr;
3303 * Handle loopback requests to INADDR_ANY.
3305 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
3306 err = pick_local_ipaddrs(dev, cm_id);
3312 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3313 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
3314 ra, ntohs(raddr->sin_port));
3315 ep->dst = find_route(dev, laddr->sin_addr.s_addr,
3316 raddr->sin_addr.s_addr, laddr->sin_port,
3317 raddr->sin_port, cm_id->tos);
3320 ra = (__u8 *)&raddr6->sin6_addr;
3323 * Handle loopback requests to INADDR_ANY.
3325 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3326 err = pick_local_ip6addrs(dev, cm_id);
3332 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3333 __func__, laddr6->sin6_addr.s6_addr,
3334 ntohs(laddr6->sin6_port),
3335 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
3336 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
3337 raddr6->sin6_addr.s6_addr,
3338 laddr6->sin6_port, raddr6->sin6_port, 0,
3339 raddr6->sin6_scope_id);
3342 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
3343 err = -EHOSTUNREACH;
3347 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
3348 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3350 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
3354 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3355 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3358 state_set(&ep->com, CONNECTING);
3359 ep->tos = cm_id->tos;
3361 /* send connect request to rnic */
3362 err = send_connect(ep);
3366 cxgb4_l2t_release(ep->l2t);
3368 dst_release(ep->dst);
3370 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
3371 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
3373 deref_cm_id(&ep->com);
3374 c4iw_put_ep(&ep->com);
3379 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3382 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
3383 &ep->com.local_addr;
3385 if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
3386 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
3387 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3391 c4iw_init_wr_wait(&ep->com.wr_wait);
3392 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3393 ep->stid, &sin6->sin6_addr,
3395 ep->com.dev->rdev.lldi.rxq_ids[0]);
3397 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3401 err = net_xmit_errno(err);
3403 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3404 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3405 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3407 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
3412 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3415 struct sockaddr_in *sin = (struct sockaddr_in *)
3416 &ep->com.local_addr;
3418 if (dev->rdev.lldi.enable_fw_ofld_conn) {
3420 err = cxgb4_create_server_filter(
3421 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3422 sin->sin_addr.s_addr, sin->sin_port, 0,
3423 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3424 if (err == -EBUSY) {
3425 if (c4iw_fatal_error(&ep->com.dev->rdev)) {
3429 set_current_state(TASK_UNINTERRUPTIBLE);
3430 schedule_timeout(usecs_to_jiffies(100));
3432 } while (err == -EBUSY);
3434 c4iw_init_wr_wait(&ep->com.wr_wait);
3435 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3436 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3437 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3439 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3443 err = net_xmit_errno(err);
3446 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3448 &sin->sin_addr, ntohs(sin->sin_port));
3452 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3455 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3456 struct c4iw_listen_ep *ep;
3460 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3462 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3466 PDBG("%s ep %p\n", __func__, ep);
3467 ep->com.cm_id = cm_id;
3468 ref_cm_id(&ep->com);
3470 ep->backlog = backlog;
3471 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3472 sizeof(ep->com.local_addr));
3475 * Allocate a server TID.
3477 if (dev->rdev.lldi.enable_fw_ofld_conn &&
3478 ep->com.local_addr.ss_family == AF_INET)
3479 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3480 cm_id->m_local_addr.ss_family, ep);
3482 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3483 cm_id->m_local_addr.ss_family, ep);
3485 if (ep->stid == -1) {
3486 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
3490 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
3492 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3493 sizeof(ep->com.local_addr));
3495 state_set(&ep->com, LISTEN);
3496 if (ep->com.local_addr.ss_family == AF_INET)
3497 err = create_server4(dev, ep);
3499 err = create_server6(dev, ep);
3501 cm_id->provider_data = ep;
3505 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3506 ep->com.local_addr.ss_family);
3508 deref_cm_id(&ep->com);
3509 c4iw_put_ep(&ep->com);
3515 int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3518 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3520 PDBG("%s ep %p\n", __func__, ep);
3523 state_set(&ep->com, DEAD);
3524 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3525 ep->com.local_addr.ss_family == AF_INET) {
3526 err = cxgb4_remove_server_filter(
3527 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3528 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3530 struct sockaddr_in6 *sin6;
3531 c4iw_init_wr_wait(&ep->com.wr_wait);
3532 err = cxgb4_remove_server(
3533 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3534 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3537 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
3539 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3540 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3541 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3543 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
3544 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3545 ep->com.local_addr.ss_family);
3547 deref_cm_id(&ep->com);
3548 c4iw_put_ep(&ep->com);
3552 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3557 struct c4iw_rdev *rdev;
3559 mutex_lock(&ep->com.mutex);
3561 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
3562 states[ep->com.state], abrupt);
3565 * Ref the ep here in case we have fatal errors causing the
3566 * ep to be released and freed.
3568 c4iw_get_ep(&ep->com);
3570 rdev = &ep->com.dev->rdev;
3571 if (c4iw_fatal_error(rdev)) {
3573 close_complete_upcall(ep, -EIO);
3574 ep->com.state = DEAD;
3576 switch (ep->com.state) {
3584 ep->com.state = ABORTING;
3586 ep->com.state = CLOSING;
3589 set_bit(CLOSE_SENT, &ep->com.flags);
3592 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3595 (void)stop_ep_timer(ep);
3596 ep->com.state = ABORTING;
3598 ep->com.state = MORIBUND;
3604 PDBG("%s ignoring disconnect ep %p state %u\n",
3605 __func__, ep, ep->com.state);
3614 set_bit(EP_DISC_ABORT, &ep->com.history);
3615 close_complete_upcall(ep, -ECONNRESET);
3616 ret = send_abort(ep, NULL, gfp);
3618 set_bit(EP_DISC_CLOSE, &ep->com.history);
3619 ret = send_halfclose(ep, gfp);
3622 set_bit(EP_DISC_FAIL, &ep->com.history);
3625 close_complete_upcall(ep, -EIO);
3628 struct c4iw_qp_attributes attrs;
3630 attrs.next_state = C4IW_QP_STATE_ERROR;
3631 ret = c4iw_modify_qp(ep->com.qp->rhp,
3633 C4IW_QP_ATTR_NEXT_STATE,
3637 "%s - qp <- error failed!\n",
3643 mutex_unlock(&ep->com.mutex);
3644 c4iw_put_ep(&ep->com);
3646 release_ep_resources(ep);
3650 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3651 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3654 int atid = be32_to_cpu(req->tid);
3656 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3657 (__force u32) req->tid);
3661 switch (req->retval) {
3663 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3664 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3665 send_fw_act_open_req(ep, atid);
3669 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3670 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3671 send_fw_act_open_req(ep, atid);
3676 pr_info("%s unexpected ofld conn wr retval %d\n",
3677 __func__, req->retval);
3680 pr_err("active ofld_connect_wr failure %d atid %d\n",
3682 mutex_lock(&dev->rdev.stats.lock);
3683 dev->rdev.stats.act_ofld_conn_fails++;
3684 mutex_unlock(&dev->rdev.stats.lock);
3685 connect_reply_upcall(ep, status2errno(req->retval));
3686 state_set(&ep->com, DEAD);
3687 if (ep->com.remote_addr.ss_family == AF_INET6) {
3688 struct sockaddr_in6 *sin6 =
3689 (struct sockaddr_in6 *)&ep->com.local_addr;
3690 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3691 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3693 remove_handle(dev, &dev->atid_idr, atid);
3694 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3695 dst_release(ep->dst);
3696 cxgb4_l2t_release(ep->l2t);
3697 c4iw_put_ep(&ep->com);
3700 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3701 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3703 struct sk_buff *rpl_skb;
3704 struct cpl_pass_accept_req *cpl;
3707 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
3710 PDBG("%s passive open failure %d\n", __func__, req->retval);
3711 mutex_lock(&dev->rdev.stats.lock);
3712 dev->rdev.stats.pas_ofld_conn_fails++;
3713 mutex_unlock(&dev->rdev.stats.lock);
3716 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3717 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
3718 (__force u32) htonl(
3719 (__force u32) req->tid)));
3720 ret = pass_accept_req(dev, rpl_skb);
3727 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3729 struct cpl_fw6_msg *rpl = cplhdr(skb);
3730 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3732 switch (rpl->type) {
3734 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3736 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3737 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3738 switch (req->t_state) {
3740 active_ofld_conn_reply(dev, skb, req);
3743 passive_ofld_conn_reply(dev, skb, req);
3746 pr_err("%s unexpected ofld conn wr state %d\n",
3747 __func__, req->t_state);
3755 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3758 __be16 hdr_len, vlantag, len;
3760 int tcp_hdr_len, ip_hdr_len;
3762 struct cpl_rx_pkt *cpl = cplhdr(skb);
3763 struct cpl_pass_accept_req *req;
3764 struct tcp_options_received tmp_opt;
3765 struct c4iw_dev *dev;
3766 enum chip_type type;
3768 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3769 /* Store values from cpl_rx_pkt in temporary location. */
3770 vlantag = cpl->vlan;
3772 l2info = cpl->l2info;
3773 hdr_len = cpl->hdr_len;
3776 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3779 * We need to parse the TCP options from SYN packet.
3780 * to generate cpl_pass_accept_req.
3782 memset(&tmp_opt, 0, sizeof(tmp_opt));
3783 tcp_clear_options(&tmp_opt);
3784 tcp_parse_options(skb, &tmp_opt, 0, NULL);
3786 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3787 memset(req, 0, sizeof(*req));
3788 req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
3789 SYN_MAC_IDX_V(RX_MACIDX_G(
3790 be32_to_cpu(l2info))) |
3792 type = dev->rdev.lldi.adapter_type;
3793 tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
3794 ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
3796 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
3797 if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
3798 eth_hdr_len = is_t4(type) ?
3799 RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
3800 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
3801 req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
3802 IP_HDR_LEN_V(ip_hdr_len) |
3803 ETH_HDR_LEN_V(eth_hdr_len));
3804 } else { /* T6 and later */
3805 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
3806 req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
3807 T6_IP_HDR_LEN_V(ip_hdr_len) |
3808 T6_ETH_HDR_LEN_V(eth_hdr_len));
3810 req->vlan = vlantag;
3812 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
3813 PASS_OPEN_TOS_V(tos));
3814 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3815 if (tmp_opt.wscale_ok)
3816 req->tcpopt.wsf = tmp_opt.snd_wscale;
3817 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3818 if (tmp_opt.sack_ok)
3819 req->tcpopt.sack = 1;
3820 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3824 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3825 __be32 laddr, __be16 lport,
3826 __be32 raddr, __be16 rport,
3827 u32 rcv_isn, u32 filter, u16 window,
3828 u32 rss_qid, u8 port_id)
3830 struct sk_buff *req_skb;
3831 struct fw_ofld_connection_wr *req;
3832 struct cpl_pass_accept_req *cpl = cplhdr(skb);
3835 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3836 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3837 memset(req, 0, sizeof(*req));
3838 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
3839 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
3840 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
3841 req->le.filter = (__force __be32) filter;
3842 req->le.lport = lport;
3843 req->le.pport = rport;
3844 req->le.u.ipv4.lip = laddr;
3845 req->le.u.ipv4.pip = raddr;
3846 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3847 req->tcb.rcv_adv = htons(window);
3848 req->tcb.t_state_to_astid =
3849 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
3850 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
3851 FW_OFLD_CONNECTION_WR_ASTID_V(
3852 PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
3855 * We store the qid in opt2 which will be used by the firmware
3856 * to send us the wr response.
3858 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
3861 * We initialize the MSS index in TCB to 0xF.
3862 * So that when driver sends cpl_pass_accept_rpl
3863 * TCB picks up the correct value. If this was 0
3864 * TP will ignore any value > 0 for MSS index.
3866 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
3867 req->cookie = (uintptr_t)skb;
3869 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3870 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3872 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
3880 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3881 * messages when a filter is being used instead of server to
3882 * redirect a syn packet. When packets hit filter they are redirected
3883 * to the offload queue and driver tries to establish the connection
3884 * using firmware work request.
3886 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3889 unsigned int filter;
3890 struct ethhdr *eh = NULL;
3891 struct vlan_ethhdr *vlan_eh = NULL;
3893 struct tcphdr *tcph;
3894 struct rss_header *rss = (void *)skb->data;
3895 struct cpl_rx_pkt *cpl = (void *)skb->data;
3896 struct cpl_pass_accept_req *req = (void *)(rss + 1);
3897 struct l2t_entry *e;
3898 struct dst_entry *dst;
3899 struct c4iw_ep *lep = NULL;
3901 struct port_info *pi;
3902 struct net_device *pdev;
3903 u16 rss_qid, eth_hdr_len;
3906 struct neighbour *neigh;
3908 /* Drop all non-SYN packets */
3909 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
3913 * Drop all packets which did not hit the filter.
3914 * Unlikely to happen.
3916 if (!(rss->filter_hit && rss->filter_tid))
3920 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3922 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3924 lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
3926 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3930 switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
3932 eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3935 eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3938 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3941 pr_err("T%d Chip is not supported\n",
3942 CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
3946 if (eth_hdr_len == ETH_HLEN) {
3947 eh = (struct ethhdr *)(req + 1);
3948 iph = (struct iphdr *)(eh + 1);
3950 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3951 iph = (struct iphdr *)(vlan_eh + 1);
3952 skb->vlan_tci = ntohs(cpl->vlan);
3955 if (iph->version != 0x4)
3958 tcph = (struct tcphdr *)(iph + 1);
3959 skb_set_network_header(skb, (void *)iph - (void *)rss);
3960 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3963 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3964 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3965 ntohs(tcph->source), iph->tos);
3967 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3970 pr_err("%s - failed to find dst entry!\n",
3974 neigh = dst_neigh_lookup_skb(dst, skb);
3977 pr_err("%s - failed to allocate neigh!\n",
3982 if (neigh->dev->flags & IFF_LOOPBACK) {
3983 pdev = ip_dev_find(&init_net, iph->daddr);
3984 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3986 pi = (struct port_info *)netdev_priv(pdev);
3987 tx_chan = cxgb4_port_chan(pdev);
3990 pdev = get_real_dev(neigh->dev);
3991 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3993 pi = (struct port_info *)netdev_priv(pdev);
3994 tx_chan = cxgb4_port_chan(pdev);
3996 neigh_release(neigh);
3998 pr_err("%s - failed to allocate l2t entry!\n",
4003 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
4004 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
4005 window = (__force u16) htons((__force u16)tcph->window);
4007 /* Calcuate filter portion for LE region. */
4008 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
4009 dev->rdev.lldi.ports[0],
4013 * Synthesize the cpl_pass_accept_req. We have everything except the
4014 * TID. Once firmware sends a reply with TID we update the TID field
4015 * in cpl and pass it through the regular cpl_pass_accept_req path.
4017 build_cpl_pass_accept_req(skb, stid, iph->tos);
4018 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
4019 tcph->source, ntohl(tcph->seq), filter, window,
4020 rss_qid, pi->port_id);
4021 cxgb4_l2t_release(e);
4026 c4iw_put_ep(&lep->com);
4031 * These are the real handlers that are called from a
4034 static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
4035 [CPL_ACT_ESTABLISH] = act_establish,
4036 [CPL_ACT_OPEN_RPL] = act_open_rpl,
4037 [CPL_RX_DATA] = rx_data,
4038 [CPL_ABORT_RPL_RSS] = abort_rpl,
4039 [CPL_ABORT_RPL] = abort_rpl,
4040 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
4041 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
4042 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
4043 [CPL_PASS_ESTABLISH] = pass_establish,
4044 [CPL_PEER_CLOSE] = peer_close,
4045 [CPL_ABORT_REQ_RSS] = peer_abort,
4046 [CPL_CLOSE_CON_RPL] = close_con_rpl,
4047 [CPL_RDMA_TERMINATE] = terminate,
4048 [CPL_FW4_ACK] = fw4_ack,
4049 [CPL_FW6_MSG] = deferred_fw6_msg,
4050 [CPL_RX_PKT] = rx_pkt,
4051 [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
4052 [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
4055 static void process_timeout(struct c4iw_ep *ep)
4057 struct c4iw_qp_attributes attrs;
4060 mutex_lock(&ep->com.mutex);
4061 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
4063 set_bit(TIMEDOUT, &ep->com.history);
4064 switch (ep->com.state) {
4066 connect_reply_upcall(ep, -ETIMEDOUT);
4075 if (ep->com.cm_id && ep->com.qp) {
4076 attrs.next_state = C4IW_QP_STATE_ERROR;
4077 c4iw_modify_qp(ep->com.qp->rhp,
4078 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
4081 close_complete_upcall(ep, -ETIMEDOUT);
4087 * These states are expected if the ep timed out at the same
4088 * time as another thread was calling stop_ep_timer().
4089 * So we silently do nothing for these states.
4094 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4095 __func__, ep, ep->hwtid, ep->com.state);
4098 mutex_unlock(&ep->com.mutex);
4100 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
4101 c4iw_put_ep(&ep->com);
4104 static void process_timedout_eps(void)
4108 spin_lock_irq(&timeout_lock);
4109 while (!list_empty(&timeout_list)) {
4110 struct list_head *tmp;
4112 tmp = timeout_list.next;
4116 spin_unlock_irq(&timeout_lock);
4117 ep = list_entry(tmp, struct c4iw_ep, entry);
4118 process_timeout(ep);
4119 spin_lock_irq(&timeout_lock);
4121 spin_unlock_irq(&timeout_lock);
4124 static void process_work(struct work_struct *work)
4126 struct sk_buff *skb = NULL;
4127 struct c4iw_dev *dev;
4128 struct cpl_act_establish *rpl;
4129 unsigned int opcode;
4132 process_timedout_eps();
4133 while ((skb = skb_dequeue(&rxq))) {
4135 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
4136 opcode = rpl->ot.opcode;
4138 BUG_ON(!work_handlers[opcode]);
4139 ret = work_handlers[opcode](dev, skb);
4142 process_timedout_eps();
4146 static DECLARE_WORK(skb_work, process_work);
4148 static void ep_timeout(unsigned long arg)
4150 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
4153 spin_lock(&timeout_lock);
4154 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
4156 * Only insert if it is not already on the list.
4158 if (!ep->entry.next) {
4159 list_add_tail(&ep->entry, &timeout_list);
4163 spin_unlock(&timeout_lock);
4165 queue_work(workq, &skb_work);
4169 * All the CM events are handled on a work queue to have a safe context.
4171 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
4175 * Save dev in the skb->cb area.
4177 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
4180 * Queue the skb and schedule the worker thread.
4182 skb_queue_tail(&rxq, skb);
4183 queue_work(workq, &skb_work);
4187 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
4189 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
4191 if (rpl->status != CPL_ERR_NONE) {
4192 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
4193 "for tid %u\n", rpl->status, GET_TID(rpl));
4199 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
4201 struct cpl_fw6_msg *rpl = cplhdr(skb);
4202 struct c4iw_wr_wait *wr_waitp;
4205 PDBG("%s type %u\n", __func__, rpl->type);
4207 switch (rpl->type) {
4208 case FW6_TYPE_WR_RPL:
4209 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
4210 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
4211 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
4213 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
4217 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
4221 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
4229 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
4231 struct cpl_abort_req_rss *req = cplhdr(skb);
4233 unsigned int tid = GET_TID(req);
4235 ep = get_ep_from_tid(dev, tid);
4236 /* This EP will be dereferenced in peer_abort() */
4238 printk(KERN_WARNING MOD
4239 "Abort on non-existent endpoint, tid %d\n", tid);
4243 if (is_neg_adv(req->status)) {
4244 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
4245 __func__, ep->hwtid, req->status,
4246 neg_adv_str(req->status));
4249 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
4252 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
4259 * Most upcalls from the T4 Core go to sched() to
4260 * schedule the processing on a work queue.
4262 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
4263 [CPL_ACT_ESTABLISH] = sched,
4264 [CPL_ACT_OPEN_RPL] = sched,
4265 [CPL_RX_DATA] = sched,
4266 [CPL_ABORT_RPL_RSS] = sched,
4267 [CPL_ABORT_RPL] = sched,
4268 [CPL_PASS_OPEN_RPL] = sched,
4269 [CPL_CLOSE_LISTSRV_RPL] = sched,
4270 [CPL_PASS_ACCEPT_REQ] = sched,
4271 [CPL_PASS_ESTABLISH] = sched,
4272 [CPL_PEER_CLOSE] = sched,
4273 [CPL_CLOSE_CON_RPL] = sched,
4274 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
4275 [CPL_RDMA_TERMINATE] = sched,
4276 [CPL_FW4_ACK] = sched,
4277 [CPL_SET_TCB_RPL] = set_tcb_rpl,
4278 [CPL_FW6_MSG] = fw6_msg,
4279 [CPL_RX_PKT] = sched
4282 int __init c4iw_cm_init(void)
4284 spin_lock_init(&timeout_lock);
4285 skb_queue_head_init(&rxq);
4287 workq = create_singlethread_workqueue("iw_cxgb4");
4294 void c4iw_cm_term(void)
4296 WARN_ON(!list_empty(&timeout_list));
4297 flush_workqueue(workq);
4298 destroy_workqueue(workq);