2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
52 static char *states[] = {
69 module_param(nocong, int, 0644);
70 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
72 static int enable_ecn;
73 module_param(enable_ecn, int, 0644);
74 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
76 static int dack_mode = 1;
77 module_param(dack_mode, int, 0644);
78 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
80 int c4iw_max_read_depth = 8;
81 module_param(c4iw_max_read_depth, int, 0644);
82 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
84 static int enable_tcp_timestamps;
85 module_param(enable_tcp_timestamps, int, 0644);
86 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
88 static int enable_tcp_sack;
89 module_param(enable_tcp_sack, int, 0644);
90 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
92 static int enable_tcp_window_scaling = 1;
93 module_param(enable_tcp_window_scaling, int, 0644);
94 MODULE_PARM_DESC(enable_tcp_window_scaling,
95 "Enable tcp window scaling (default=1)");
98 module_param(c4iw_debug, int, 0644);
99 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
101 static int peer2peer = 1;
102 module_param(peer2peer, int, 0644);
103 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
105 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
106 module_param(p2p_type, int, 0644);
107 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
108 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
110 static int ep_timeout_secs = 60;
111 module_param(ep_timeout_secs, int, 0644);
112 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
113 "in seconds (default=60)");
115 static int mpa_rev = 1;
116 module_param(mpa_rev, int, 0644);
117 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
118 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
119 " compliant (default=1)");
121 static int markers_enabled;
122 module_param(markers_enabled, int, 0644);
123 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
125 static int crc_enabled = 1;
126 module_param(crc_enabled, int, 0644);
127 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
129 static int rcv_win = 256 * 1024;
130 module_param(rcv_win, int, 0644);
131 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
133 static int snd_win = 128 * 1024;
134 module_param(snd_win, int, 0644);
135 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
137 static struct workqueue_struct *workq;
139 static struct sk_buff_head rxq;
141 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
142 static void ep_timeout(unsigned long arg);
143 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
145 static LIST_HEAD(timeout_list);
146 static spinlock_t timeout_lock;
148 static void deref_qp(struct c4iw_ep *ep)
150 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
151 clear_bit(QP_REFERENCED, &ep->com.flags);
154 static void ref_qp(struct c4iw_ep *ep)
156 set_bit(QP_REFERENCED, &ep->com.flags);
157 c4iw_qp_add_ref(&ep->com.qp->ibqp);
160 static void start_ep_timer(struct c4iw_ep *ep)
162 PDBG("%s ep %p\n", __func__, ep);
163 if (timer_pending(&ep->timer)) {
164 pr_err("%s timer already started! ep %p\n",
168 clear_bit(TIMEOUT, &ep->com.flags);
169 c4iw_get_ep(&ep->com);
170 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
171 ep->timer.data = (unsigned long)ep;
172 ep->timer.function = ep_timeout;
173 add_timer(&ep->timer);
176 static int stop_ep_timer(struct c4iw_ep *ep)
178 PDBG("%s ep %p stopping\n", __func__, ep);
179 del_timer_sync(&ep->timer);
180 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
181 c4iw_put_ep(&ep->com);
187 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
188 struct l2t_entry *l2e)
192 if (c4iw_fatal_error(rdev)) {
194 PDBG("%s - device in error state - dropping\n", __func__);
197 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
200 return error < 0 ? error : 0;
203 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
207 if (c4iw_fatal_error(rdev)) {
209 PDBG("%s - device in error state - dropping\n", __func__);
212 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
215 return error < 0 ? error : 0;
218 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
220 struct cpl_tid_release *req;
222 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
225 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
226 INIT_TP_WR(req, hwtid);
227 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
228 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
229 c4iw_ofld_send(rdev, skb);
233 static void set_emss(struct c4iw_ep *ep, u16 opt)
235 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
237 if (GET_TCPOPT_TSTAMP(opt))
241 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
245 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
247 enum c4iw_ep_state state;
249 mutex_lock(&epc->mutex);
251 mutex_unlock(&epc->mutex);
255 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
260 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
262 mutex_lock(&epc->mutex);
263 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
264 __state_set(epc, new);
265 mutex_unlock(&epc->mutex);
269 static void *alloc_ep(int size, gfp_t gfp)
271 struct c4iw_ep_common *epc;
273 epc = kzalloc(size, gfp);
275 kref_init(&epc->kref);
276 mutex_init(&epc->mutex);
277 c4iw_init_wr_wait(&epc->wr_wait);
279 PDBG("%s alloc ep %p\n", __func__, epc);
283 void _c4iw_free_ep(struct kref *kref)
287 ep = container_of(kref, struct c4iw_ep, com.kref);
288 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
289 if (test_bit(QP_REFERENCED, &ep->com.flags))
291 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
292 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
293 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
294 dst_release(ep->dst);
295 cxgb4_l2t_release(ep->l2t);
297 if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
298 print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
299 iwpm_remove_mapinfo(&ep->com.local_addr,
300 &ep->com.mapped_local_addr);
301 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
306 static void release_ep_resources(struct c4iw_ep *ep)
308 set_bit(RELEASE_RESOURCES, &ep->com.flags);
309 c4iw_put_ep(&ep->com);
312 static int status2errno(int status)
317 case CPL_ERR_CONN_RESET:
319 case CPL_ERR_ARP_MISS:
320 return -EHOSTUNREACH;
321 case CPL_ERR_CONN_TIMEDOUT:
323 case CPL_ERR_TCAM_FULL:
325 case CPL_ERR_CONN_EXIST:
333 * Try and reuse skbs already allocated...
335 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
337 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
340 skb_reset_transport_header(skb);
342 skb = alloc_skb(len, gfp);
344 t4_set_arp_err_handler(skb, NULL, NULL);
348 static struct net_device *get_real_dev(struct net_device *egress_dev)
350 struct net_device *phys_dev = egress_dev;
351 if (egress_dev->priv_flags & IFF_802_1Q_VLAN)
352 phys_dev = vlan_dev_real_dev(egress_dev);
356 static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
360 egress_dev = get_real_dev(egress_dev);
361 for (i = 0; i < dev->rdev.lldi.nports; i++)
362 if (dev->rdev.lldi.ports[i] == egress_dev)
367 static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
368 __u8 *peer_ip, __be16 local_port,
369 __be16 peer_port, u8 tos,
372 struct dst_entry *dst = NULL;
374 if (IS_ENABLED(CONFIG_IPV6)) {
377 memset(&fl6, 0, sizeof(fl6));
378 memcpy(&fl6.daddr, peer_ip, 16);
379 memcpy(&fl6.saddr, local_ip, 16);
380 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
381 fl6.flowi6_oif = sin6_scope_id;
382 dst = ip6_route_output(&init_net, NULL, &fl6);
385 if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
386 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
396 static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
397 __be32 peer_ip, __be16 local_port,
398 __be16 peer_port, u8 tos)
404 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
405 peer_port, local_port, IPPROTO_TCP,
409 n = dst_neigh_lookup(&rt->dst, &peer_ip);
412 if (!our_interface(dev, n->dev) &&
413 !(n->dev->flags & IFF_LOOPBACK)) {
414 dst_release(&rt->dst);
421 static void arp_failure_discard(void *handle, struct sk_buff *skb)
423 PDBG("%s c4iw_dev %p\n", __func__, handle);
428 * Handle an ARP failure for an active open.
430 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
432 printk(KERN_ERR MOD "ARP failure duing connect\n");
437 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
440 static void abort_arp_failure(void *handle, struct sk_buff *skb)
442 struct c4iw_rdev *rdev = handle;
443 struct cpl_abort_req *req = cplhdr(skb);
445 PDBG("%s rdev %p\n", __func__, rdev);
446 req->cmd = CPL_ABORT_NO_RST;
447 c4iw_ofld_send(rdev, skb);
450 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
452 unsigned int flowclen = 80;
453 struct fw_flowc_wr *flowc;
456 skb = get_skb(skb, flowclen, GFP_KERNEL);
457 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
459 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
460 FW_FLOWC_WR_NPARAMS(8));
461 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
462 16)) | FW_WR_FLOWID(ep->hwtid));
464 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
465 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
466 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
467 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
468 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
469 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
470 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
471 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
472 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
473 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
474 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
475 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
476 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
477 flowc->mnemval[6].val = cpu_to_be32(snd_win);
478 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
479 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
480 /* Pad WR to 16 byte boundary */
481 flowc->mnemval[8].mnemonic = 0;
482 flowc->mnemval[8].val = 0;
483 for (i = 0; i < 9; i++) {
484 flowc->mnemval[i].r4[0] = 0;
485 flowc->mnemval[i].r4[1] = 0;
486 flowc->mnemval[i].r4[2] = 0;
489 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
490 c4iw_ofld_send(&ep->com.dev->rdev, skb);
493 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
495 struct cpl_close_con_req *req;
497 int wrlen = roundup(sizeof *req, 16);
499 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
500 skb = get_skb(NULL, wrlen, gfp);
502 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
505 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
506 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
507 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
508 memset(req, 0, wrlen);
509 INIT_TP_WR(req, ep->hwtid);
510 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
512 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
515 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
517 struct cpl_abort_req *req;
518 int wrlen = roundup(sizeof *req, 16);
520 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
521 skb = get_skb(skb, wrlen, gfp);
523 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
527 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
528 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
529 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
530 memset(req, 0, wrlen);
531 INIT_TP_WR(req, ep->hwtid);
532 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
533 req->cmd = CPL_ABORT_SEND_RST;
534 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
538 * c4iw_form_pm_msg - Form a port mapper message with mapping info
540 static void c4iw_form_pm_msg(struct c4iw_ep *ep,
541 struct iwpm_sa_data *pm_msg)
543 memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
544 sizeof(ep->com.local_addr));
545 memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
546 sizeof(ep->com.remote_addr));
550 * c4iw_form_reg_msg - Form a port mapper message with dev info
552 static void c4iw_form_reg_msg(struct c4iw_dev *dev,
553 struct iwpm_dev_data *pm_msg)
555 memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
556 memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
560 static void c4iw_record_pm_msg(struct c4iw_ep *ep,
561 struct iwpm_sa_data *pm_msg)
563 memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
564 sizeof(ep->com.mapped_local_addr));
565 memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
566 sizeof(ep->com.mapped_remote_addr));
569 static int send_connect(struct c4iw_ep *ep)
571 struct cpl_act_open_req *req;
572 struct cpl_t5_act_open_req *t5_req;
573 struct cpl_act_open_req6 *req6;
574 struct cpl_t5_act_open_req6 *t5_req6;
578 unsigned int mtu_idx;
581 int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
582 sizeof(struct cpl_act_open_req) :
583 sizeof(struct cpl_t5_act_open_req);
584 int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
585 sizeof(struct cpl_act_open_req6) :
586 sizeof(struct cpl_t5_act_open_req6);
587 struct sockaddr_in *la = (struct sockaddr_in *)
588 &ep->com.mapped_local_addr;
589 struct sockaddr_in *ra = (struct sockaddr_in *)
590 &ep->com.mapped_remote_addr;
591 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
592 &ep->com.mapped_local_addr;
593 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
594 &ep->com.mapped_remote_addr;
596 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
597 roundup(sizev4, 16) :
600 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
602 skb = get_skb(NULL, wrlen, GFP_KERNEL);
604 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
608 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
610 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
611 wscale = compute_wscale(rcv_win);
612 opt0 = (nocong ? NO_CONG(1) : 0) |
617 L2T_IDX(ep->l2t->idx) |
618 TX_CHAN(ep->tx_chan) |
619 SMAC_SEL(ep->smac_idx) |
621 ULP_MODE(ULP_MODE_TCPDDP) |
622 RCV_BUFSIZ(rcv_win>>10);
623 opt2 = RX_CHANNEL(0) |
624 CCTRL_ECN(enable_ecn) |
625 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
626 if (enable_tcp_timestamps)
627 opt2 |= TSTAMPS_EN(1);
630 if (wscale && enable_tcp_window_scaling)
631 opt2 |= WND_SCALE_EN(1);
632 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
633 opt2 |= T5_OPT_2_VALID;
634 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
636 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
638 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
639 if (ep->com.remote_addr.ss_family == AF_INET) {
640 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
642 OPCODE_TID(req) = cpu_to_be32(
643 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
644 ((ep->rss_qid << 14) | ep->atid)));
645 req->local_port = la->sin_port;
646 req->peer_port = ra->sin_port;
647 req->local_ip = la->sin_addr.s_addr;
648 req->peer_ip = ra->sin_addr.s_addr;
649 req->opt0 = cpu_to_be64(opt0);
650 req->params = cpu_to_be32(cxgb4_select_ntuple(
651 ep->com.dev->rdev.lldi.ports[0],
653 req->opt2 = cpu_to_be32(opt2);
655 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
658 OPCODE_TID(req6) = cpu_to_be32(
659 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
660 ((ep->rss_qid<<14)|ep->atid)));
661 req6->local_port = la6->sin6_port;
662 req6->peer_port = ra6->sin6_port;
663 req6->local_ip_hi = *((__be64 *)
664 (la6->sin6_addr.s6_addr));
665 req6->local_ip_lo = *((__be64 *)
666 (la6->sin6_addr.s6_addr + 8));
667 req6->peer_ip_hi = *((__be64 *)
668 (ra6->sin6_addr.s6_addr));
669 req6->peer_ip_lo = *((__be64 *)
670 (ra6->sin6_addr.s6_addr + 8));
671 req6->opt0 = cpu_to_be64(opt0);
672 req6->params = cpu_to_be32(cxgb4_select_ntuple(
673 ep->com.dev->rdev.lldi.ports[0],
675 req6->opt2 = cpu_to_be32(opt2);
678 if (ep->com.remote_addr.ss_family == AF_INET) {
679 t5_req = (struct cpl_t5_act_open_req *)
681 INIT_TP_WR(t5_req, 0);
682 OPCODE_TID(t5_req) = cpu_to_be32(
683 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
684 ((ep->rss_qid << 14) | ep->atid)));
685 t5_req->local_port = la->sin_port;
686 t5_req->peer_port = ra->sin_port;
687 t5_req->local_ip = la->sin_addr.s_addr;
688 t5_req->peer_ip = ra->sin_addr.s_addr;
689 t5_req->opt0 = cpu_to_be64(opt0);
690 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
692 ep->com.dev->rdev.lldi.ports[0],
694 t5_req->opt2 = cpu_to_be32(opt2);
696 t5_req6 = (struct cpl_t5_act_open_req6 *)
698 INIT_TP_WR(t5_req6, 0);
699 OPCODE_TID(t5_req6) = cpu_to_be32(
700 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
701 ((ep->rss_qid<<14)|ep->atid)));
702 t5_req6->local_port = la6->sin6_port;
703 t5_req6->peer_port = ra6->sin6_port;
704 t5_req6->local_ip_hi = *((__be64 *)
705 (la6->sin6_addr.s6_addr));
706 t5_req6->local_ip_lo = *((__be64 *)
707 (la6->sin6_addr.s6_addr + 8));
708 t5_req6->peer_ip_hi = *((__be64 *)
709 (ra6->sin6_addr.s6_addr));
710 t5_req6->peer_ip_lo = *((__be64 *)
711 (ra6->sin6_addr.s6_addr + 8));
712 t5_req6->opt0 = cpu_to_be64(opt0);
713 t5_req6->params = (__force __be64)cpu_to_be32(
715 ep->com.dev->rdev.lldi.ports[0],
717 t5_req6->opt2 = cpu_to_be32(opt2);
721 set_bit(ACT_OPEN_REQ, &ep->com.history);
722 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
725 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
729 struct fw_ofld_tx_data_wr *req;
730 struct mpa_message *mpa;
731 struct mpa_v2_conn_params mpa_v2_params;
733 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
735 BUG_ON(skb_cloned(skb));
737 mpalen = sizeof(*mpa) + ep->plen;
738 if (mpa_rev_to_use == 2)
739 mpalen += sizeof(struct mpa_v2_conn_params);
740 wrlen = roundup(mpalen + sizeof *req, 16);
741 skb = get_skb(skb, wrlen, GFP_KERNEL);
743 connect_reply_upcall(ep, -ENOMEM);
746 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
748 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
749 memset(req, 0, wrlen);
750 req->op_to_immdlen = cpu_to_be32(
751 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
753 FW_WR_IMMDLEN(mpalen));
754 req->flowid_len16 = cpu_to_be32(
755 FW_WR_FLOWID(ep->hwtid) |
756 FW_WR_LEN16(wrlen >> 4));
757 req->plen = cpu_to_be32(mpalen);
758 req->tunnel_to_proxy = cpu_to_be32(
759 FW_OFLD_TX_DATA_WR_FLUSH(1) |
760 FW_OFLD_TX_DATA_WR_SHOVE(1));
762 mpa = (struct mpa_message *)(req + 1);
763 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
764 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
765 (markers_enabled ? MPA_MARKERS : 0) |
766 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
767 mpa->private_data_size = htons(ep->plen);
768 mpa->revision = mpa_rev_to_use;
769 if (mpa_rev_to_use == 1) {
770 ep->tried_with_mpa_v1 = 1;
771 ep->retry_with_mpa_v1 = 0;
774 if (mpa_rev_to_use == 2) {
775 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
776 sizeof (struct mpa_v2_conn_params));
777 mpa_v2_params.ird = htons((u16)ep->ird);
778 mpa_v2_params.ord = htons((u16)ep->ord);
781 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
782 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
784 htons(MPA_V2_RDMA_WRITE_RTR);
785 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
787 htons(MPA_V2_RDMA_READ_RTR);
789 memcpy(mpa->private_data, &mpa_v2_params,
790 sizeof(struct mpa_v2_conn_params));
793 memcpy(mpa->private_data +
794 sizeof(struct mpa_v2_conn_params),
795 ep->mpa_pkt + sizeof(*mpa), ep->plen);
798 memcpy(mpa->private_data,
799 ep->mpa_pkt + sizeof(*mpa), ep->plen);
802 * Reference the mpa skb. This ensures the data area
803 * will remain in memory until the hw acks the tx.
804 * Function fw4_ack() will deref it.
807 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
810 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
812 __state_set(&ep->com, MPA_REQ_SENT);
813 ep->mpa_attr.initiator = 1;
814 ep->snd_seq += mpalen;
818 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
821 struct fw_ofld_tx_data_wr *req;
822 struct mpa_message *mpa;
824 struct mpa_v2_conn_params mpa_v2_params;
826 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
828 mpalen = sizeof(*mpa) + plen;
829 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
830 mpalen += sizeof(struct mpa_v2_conn_params);
831 wrlen = roundup(mpalen + sizeof *req, 16);
833 skb = get_skb(NULL, wrlen, GFP_KERNEL);
835 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
838 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
840 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
841 memset(req, 0, wrlen);
842 req->op_to_immdlen = cpu_to_be32(
843 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
845 FW_WR_IMMDLEN(mpalen));
846 req->flowid_len16 = cpu_to_be32(
847 FW_WR_FLOWID(ep->hwtid) |
848 FW_WR_LEN16(wrlen >> 4));
849 req->plen = cpu_to_be32(mpalen);
850 req->tunnel_to_proxy = cpu_to_be32(
851 FW_OFLD_TX_DATA_WR_FLUSH(1) |
852 FW_OFLD_TX_DATA_WR_SHOVE(1));
854 mpa = (struct mpa_message *)(req + 1);
855 memset(mpa, 0, sizeof(*mpa));
856 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
857 mpa->flags = MPA_REJECT;
858 mpa->revision = ep->mpa_attr.version;
859 mpa->private_data_size = htons(plen);
861 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
862 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
863 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
864 sizeof (struct mpa_v2_conn_params));
865 mpa_v2_params.ird = htons(((u16)ep->ird) |
866 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
868 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
870 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
871 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
872 FW_RI_INIT_P2PTYPE_READ_REQ ?
873 MPA_V2_RDMA_READ_RTR : 0) : 0));
874 memcpy(mpa->private_data, &mpa_v2_params,
875 sizeof(struct mpa_v2_conn_params));
878 memcpy(mpa->private_data +
879 sizeof(struct mpa_v2_conn_params), pdata, plen);
882 memcpy(mpa->private_data, pdata, plen);
885 * Reference the mpa skb again. This ensures the data area
886 * will remain in memory until the hw acks the tx.
887 * Function fw4_ack() will deref it.
890 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
891 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
894 ep->snd_seq += mpalen;
895 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
898 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
901 struct fw_ofld_tx_data_wr *req;
902 struct mpa_message *mpa;
904 struct mpa_v2_conn_params mpa_v2_params;
906 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
908 mpalen = sizeof(*mpa) + plen;
909 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
910 mpalen += sizeof(struct mpa_v2_conn_params);
911 wrlen = roundup(mpalen + sizeof *req, 16);
913 skb = get_skb(NULL, wrlen, GFP_KERNEL);
915 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
918 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
920 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
921 memset(req, 0, wrlen);
922 req->op_to_immdlen = cpu_to_be32(
923 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
925 FW_WR_IMMDLEN(mpalen));
926 req->flowid_len16 = cpu_to_be32(
927 FW_WR_FLOWID(ep->hwtid) |
928 FW_WR_LEN16(wrlen >> 4));
929 req->plen = cpu_to_be32(mpalen);
930 req->tunnel_to_proxy = cpu_to_be32(
931 FW_OFLD_TX_DATA_WR_FLUSH(1) |
932 FW_OFLD_TX_DATA_WR_SHOVE(1));
934 mpa = (struct mpa_message *)(req + 1);
935 memset(mpa, 0, sizeof(*mpa));
936 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
937 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
938 (markers_enabled ? MPA_MARKERS : 0);
939 mpa->revision = ep->mpa_attr.version;
940 mpa->private_data_size = htons(plen);
942 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
943 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
944 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
945 sizeof (struct mpa_v2_conn_params));
946 mpa_v2_params.ird = htons((u16)ep->ird);
947 mpa_v2_params.ord = htons((u16)ep->ord);
948 if (peer2peer && (ep->mpa_attr.p2p_type !=
949 FW_RI_INIT_P2PTYPE_DISABLED)) {
950 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
952 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
954 htons(MPA_V2_RDMA_WRITE_RTR);
955 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
957 htons(MPA_V2_RDMA_READ_RTR);
960 memcpy(mpa->private_data, &mpa_v2_params,
961 sizeof(struct mpa_v2_conn_params));
964 memcpy(mpa->private_data +
965 sizeof(struct mpa_v2_conn_params), pdata, plen);
968 memcpy(mpa->private_data, pdata, plen);
971 * Reference the mpa skb. This ensures the data area
972 * will remain in memory until the hw acks the tx.
973 * Function fw4_ack() will deref it.
976 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
978 __state_set(&ep->com, MPA_REP_SENT);
979 ep->snd_seq += mpalen;
980 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
983 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
986 struct cpl_act_establish *req = cplhdr(skb);
987 unsigned int tid = GET_TID(req);
988 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
989 struct tid_info *t = dev->rdev.lldi.tids;
991 ep = lookup_atid(t, atid);
993 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
994 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
996 mutex_lock(&ep->com.mutex);
997 dst_confirm(ep->dst);
999 /* setup the hwtid for this connection */
1001 cxgb4_insert_tid(t, ep, tid);
1002 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
1004 ep->snd_seq = be32_to_cpu(req->snd_isn);
1005 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1007 set_emss(ep, ntohs(req->tcp_opt));
1009 /* dealloc the atid */
1010 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1011 cxgb4_free_atid(t, atid);
1012 set_bit(ACT_ESTAB, &ep->com.history);
1014 /* start MPA negotiation */
1015 send_flowc(ep, NULL);
1016 if (ep->retry_with_mpa_v1)
1017 send_mpa_req(ep, skb, 1);
1019 send_mpa_req(ep, skb, mpa_rev);
1020 mutex_unlock(&ep->com.mutex);
1024 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1026 struct iw_cm_event event;
1028 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1029 memset(&event, 0, sizeof(event));
1030 event.event = IW_CM_EVENT_CLOSE;
1031 event.status = status;
1032 if (ep->com.cm_id) {
1033 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1034 ep, ep->com.cm_id, ep->hwtid);
1035 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1036 ep->com.cm_id->rem_ref(ep->com.cm_id);
1037 ep->com.cm_id = NULL;
1038 set_bit(CLOSE_UPCALL, &ep->com.history);
1042 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
1044 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1045 __state_set(&ep->com, ABORTING);
1046 set_bit(ABORT_CONN, &ep->com.history);
1047 return send_abort(ep, skb, gfp);
1050 static void peer_close_upcall(struct c4iw_ep *ep)
1052 struct iw_cm_event event;
1054 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1055 memset(&event, 0, sizeof(event));
1056 event.event = IW_CM_EVENT_DISCONNECT;
1057 if (ep->com.cm_id) {
1058 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1059 ep, ep->com.cm_id, ep->hwtid);
1060 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1061 set_bit(DISCONN_UPCALL, &ep->com.history);
1065 static void peer_abort_upcall(struct c4iw_ep *ep)
1067 struct iw_cm_event event;
1069 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1070 memset(&event, 0, sizeof(event));
1071 event.event = IW_CM_EVENT_CLOSE;
1072 event.status = -ECONNRESET;
1073 if (ep->com.cm_id) {
1074 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
1075 ep->com.cm_id, ep->hwtid);
1076 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1077 ep->com.cm_id->rem_ref(ep->com.cm_id);
1078 ep->com.cm_id = NULL;
1079 set_bit(ABORT_UPCALL, &ep->com.history);
1083 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1085 struct iw_cm_event event;
1087 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
1088 memset(&event, 0, sizeof(event));
1089 event.event = IW_CM_EVENT_CONNECT_REPLY;
1090 event.status = status;
1091 memcpy(&event.local_addr, &ep->com.local_addr,
1092 sizeof(ep->com.local_addr));
1093 memcpy(&event.remote_addr, &ep->com.remote_addr,
1094 sizeof(ep->com.remote_addr));
1096 if ((status == 0) || (status == -ECONNREFUSED)) {
1097 if (!ep->tried_with_mpa_v1) {
1098 /* this means MPA_v2 is used */
1099 event.private_data_len = ep->plen -
1100 sizeof(struct mpa_v2_conn_params);
1101 event.private_data = ep->mpa_pkt +
1102 sizeof(struct mpa_message) +
1103 sizeof(struct mpa_v2_conn_params);
1105 /* this means MPA_v1 is used */
1106 event.private_data_len = ep->plen;
1107 event.private_data = ep->mpa_pkt +
1108 sizeof(struct mpa_message);
1112 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
1114 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1115 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1118 ep->com.cm_id->rem_ref(ep->com.cm_id);
1119 ep->com.cm_id = NULL;
1123 static int connect_request_upcall(struct c4iw_ep *ep)
1125 struct iw_cm_event event;
1128 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1129 memset(&event, 0, sizeof(event));
1130 event.event = IW_CM_EVENT_CONNECT_REQUEST;
1131 memcpy(&event.local_addr, &ep->com.local_addr,
1132 sizeof(ep->com.local_addr));
1133 memcpy(&event.remote_addr, &ep->com.remote_addr,
1134 sizeof(ep->com.remote_addr));
1135 event.provider_data = ep;
1136 if (!ep->tried_with_mpa_v1) {
1137 /* this means MPA_v2 is used */
1138 event.ord = ep->ord;
1139 event.ird = ep->ird;
1140 event.private_data_len = ep->plen -
1141 sizeof(struct mpa_v2_conn_params);
1142 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1143 sizeof(struct mpa_v2_conn_params);
1145 /* this means MPA_v1 is used. Send max supported */
1146 event.ord = c4iw_max_read_depth;
1147 event.ird = c4iw_max_read_depth;
1148 event.private_data_len = ep->plen;
1149 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1151 c4iw_get_ep(&ep->com);
1152 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1155 c4iw_put_ep(&ep->com);
1156 set_bit(CONNREQ_UPCALL, &ep->com.history);
1157 c4iw_put_ep(&ep->parent_ep->com);
1161 static void established_upcall(struct c4iw_ep *ep)
1163 struct iw_cm_event event;
1165 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1166 memset(&event, 0, sizeof(event));
1167 event.event = IW_CM_EVENT_ESTABLISHED;
1168 event.ird = ep->ird;
1169 event.ord = ep->ord;
1170 if (ep->com.cm_id) {
1171 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1172 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1173 set_bit(ESTAB_UPCALL, &ep->com.history);
1177 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1179 struct cpl_rx_data_ack *req;
1180 struct sk_buff *skb;
1181 int wrlen = roundup(sizeof *req, 16);
1183 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1184 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1186 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1190 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1191 memset(req, 0, wrlen);
1192 INIT_TP_WR(req, ep->hwtid);
1193 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1195 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
1197 V_RX_DACK_MODE(dack_mode));
1198 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
1199 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1203 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1205 struct mpa_message *mpa;
1206 struct mpa_v2_conn_params *mpa_v2_params;
1208 u16 resp_ird, resp_ord;
1209 u8 rtr_mismatch = 0, insuff_ird = 0;
1210 struct c4iw_qp_attributes attrs;
1211 enum c4iw_qp_attr_mask mask;
1215 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1218 * Stop mpa timer. If it expired, then
1219 * we ignore the MPA reply. process_timeout()
1220 * will abort the connection.
1222 if (stop_ep_timer(ep))
1226 * If we get more than the supported amount of private data
1227 * then we must fail this connection.
1229 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1235 * copy the new data into our accumulation buffer.
1237 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1239 ep->mpa_pkt_len += skb->len;
1242 * if we don't even have the mpa message, then bail.
1244 if (ep->mpa_pkt_len < sizeof(*mpa))
1246 mpa = (struct mpa_message *) ep->mpa_pkt;
1248 /* Validate MPA header. */
1249 if (mpa->revision > mpa_rev) {
1250 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1251 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1255 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1260 plen = ntohs(mpa->private_data_size);
1263 * Fail if there's too much private data.
1265 if (plen > MPA_MAX_PRIVATE_DATA) {
1271 * If plen does not account for pkt size
1273 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1278 ep->plen = (u8) plen;
1281 * If we don't have all the pdata yet, then bail.
1282 * We'll continue process when more data arrives.
1284 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1287 if (mpa->flags & MPA_REJECT) {
1288 err = -ECONNREFUSED;
1293 * If we get here we have accumulated the entire mpa
1294 * start reply message including private data. And
1295 * the MPA header is valid.
1297 __state_set(&ep->com, FPDU_MODE);
1298 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1299 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1300 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1301 ep->mpa_attr.version = mpa->revision;
1302 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1304 if (mpa->revision == 2) {
1305 ep->mpa_attr.enhanced_rdma_conn =
1306 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1307 if (ep->mpa_attr.enhanced_rdma_conn) {
1308 mpa_v2_params = (struct mpa_v2_conn_params *)
1309 (ep->mpa_pkt + sizeof(*mpa));
1310 resp_ird = ntohs(mpa_v2_params->ird) &
1311 MPA_V2_IRD_ORD_MASK;
1312 resp_ord = ntohs(mpa_v2_params->ord) &
1313 MPA_V2_IRD_ORD_MASK;
1316 * This is a double-check. Ideally, below checks are
1317 * not required since ird/ord stuff has been taken
1318 * care of in c4iw_accept_cr
1320 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1327 if (ntohs(mpa_v2_params->ird) &
1328 MPA_V2_PEER2PEER_MODEL) {
1329 if (ntohs(mpa_v2_params->ord) &
1330 MPA_V2_RDMA_WRITE_RTR)
1331 ep->mpa_attr.p2p_type =
1332 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1333 else if (ntohs(mpa_v2_params->ord) &
1334 MPA_V2_RDMA_READ_RTR)
1335 ep->mpa_attr.p2p_type =
1336 FW_RI_INIT_P2PTYPE_READ_REQ;
1339 } else if (mpa->revision == 1)
1341 ep->mpa_attr.p2p_type = p2p_type;
1343 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1344 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1345 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1346 ep->mpa_attr.recv_marker_enabled,
1347 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1348 ep->mpa_attr.p2p_type, p2p_type);
1351 * If responder's RTR does not match with that of initiator, assign
1352 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1353 * generated when moving QP to RTS state.
1354 * A TERM message will be sent after QP has moved to RTS state
1356 if ((ep->mpa_attr.version == 2) && peer2peer &&
1357 (ep->mpa_attr.p2p_type != p2p_type)) {
1358 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1362 attrs.mpa_attr = ep->mpa_attr;
1363 attrs.max_ird = ep->ird;
1364 attrs.max_ord = ep->ord;
1365 attrs.llp_stream_handle = ep;
1366 attrs.next_state = C4IW_QP_STATE_RTS;
1368 mask = C4IW_QP_ATTR_NEXT_STATE |
1369 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1370 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1372 /* bind QP and TID with INIT_WR */
1373 err = c4iw_modify_qp(ep->com.qp->rhp,
1374 ep->com.qp, mask, &attrs, 1);
1379 * If responder's RTR requirement did not match with what initiator
1380 * supports, generate TERM message
1383 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1384 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1385 attrs.ecode = MPA_NOMATCH_RTR;
1386 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1387 attrs.send_term = 1;
1388 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1389 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1396 * Generate TERM if initiator IRD is not sufficient for responder
1397 * provided ORD. Currently, we do the same behaviour even when
1398 * responder provided IRD is also not sufficient as regards to
1402 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1404 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1405 attrs.ecode = MPA_INSUFF_IRD;
1406 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1407 attrs.send_term = 1;
1408 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1409 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1416 __state_set(&ep->com, ABORTING);
1417 send_abort(ep, skb, GFP_KERNEL);
1419 connect_reply_upcall(ep, err);
1423 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1425 struct mpa_message *mpa;
1426 struct mpa_v2_conn_params *mpa_v2_params;
1429 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1432 * If we get more than the supported amount of private data
1433 * then we must fail this connection.
1435 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1436 (void)stop_ep_timer(ep);
1437 abort_connection(ep, skb, GFP_KERNEL);
1441 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1444 * Copy the new data into our accumulation buffer.
1446 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1448 ep->mpa_pkt_len += skb->len;
1451 * If we don't even have the mpa message, then bail.
1452 * We'll continue process when more data arrives.
1454 if (ep->mpa_pkt_len < sizeof(*mpa))
1457 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1458 mpa = (struct mpa_message *) ep->mpa_pkt;
1461 * Validate MPA Header.
1463 if (mpa->revision > mpa_rev) {
1464 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1465 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1466 (void)stop_ep_timer(ep);
1467 abort_connection(ep, skb, GFP_KERNEL);
1471 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1472 (void)stop_ep_timer(ep);
1473 abort_connection(ep, skb, GFP_KERNEL);
1477 plen = ntohs(mpa->private_data_size);
1480 * Fail if there's too much private data.
1482 if (plen > MPA_MAX_PRIVATE_DATA) {
1483 (void)stop_ep_timer(ep);
1484 abort_connection(ep, skb, GFP_KERNEL);
1489 * If plen does not account for pkt size
1491 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1492 (void)stop_ep_timer(ep);
1493 abort_connection(ep, skb, GFP_KERNEL);
1496 ep->plen = (u8) plen;
1499 * If we don't have all the pdata yet, then bail.
1501 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1505 * If we get here we have accumulated the entire mpa
1506 * start reply message including private data.
1508 ep->mpa_attr.initiator = 0;
1509 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1510 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1511 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1512 ep->mpa_attr.version = mpa->revision;
1513 if (mpa->revision == 1)
1514 ep->tried_with_mpa_v1 = 1;
1515 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1517 if (mpa->revision == 2) {
1518 ep->mpa_attr.enhanced_rdma_conn =
1519 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1520 if (ep->mpa_attr.enhanced_rdma_conn) {
1521 mpa_v2_params = (struct mpa_v2_conn_params *)
1522 (ep->mpa_pkt + sizeof(*mpa));
1523 ep->ird = ntohs(mpa_v2_params->ird) &
1524 MPA_V2_IRD_ORD_MASK;
1525 ep->ord = ntohs(mpa_v2_params->ord) &
1526 MPA_V2_IRD_ORD_MASK;
1527 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1529 if (ntohs(mpa_v2_params->ord) &
1530 MPA_V2_RDMA_WRITE_RTR)
1531 ep->mpa_attr.p2p_type =
1532 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1533 else if (ntohs(mpa_v2_params->ord) &
1534 MPA_V2_RDMA_READ_RTR)
1535 ep->mpa_attr.p2p_type =
1536 FW_RI_INIT_P2PTYPE_READ_REQ;
1539 } else if (mpa->revision == 1)
1541 ep->mpa_attr.p2p_type = p2p_type;
1543 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1544 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1545 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1546 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1547 ep->mpa_attr.p2p_type);
1550 * If the endpoint timer already expired, then we ignore
1551 * the start request. process_timeout() will abort
1554 if (!stop_ep_timer(ep)) {
1555 __state_set(&ep->com, MPA_REQ_RCVD);
1558 mutex_lock(&ep->parent_ep->com.mutex);
1559 if (ep->parent_ep->com.state != DEAD) {
1560 if (connect_request_upcall(ep))
1561 abort_connection(ep, skb, GFP_KERNEL);
1563 abort_connection(ep, skb, GFP_KERNEL);
1565 mutex_unlock(&ep->parent_ep->com.mutex);
1570 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1573 struct cpl_rx_data *hdr = cplhdr(skb);
1574 unsigned int dlen = ntohs(hdr->len);
1575 unsigned int tid = GET_TID(hdr);
1576 struct tid_info *t = dev->rdev.lldi.tids;
1577 __u8 status = hdr->status;
1580 ep = lookup_tid(t, tid);
1583 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1584 skb_pull(skb, sizeof(*hdr));
1585 skb_trim(skb, dlen);
1586 mutex_lock(&ep->com.mutex);
1588 /* update RX credits */
1589 update_rx_credits(ep, dlen);
1591 switch (ep->com.state) {
1593 ep->rcv_seq += dlen;
1594 disconnect = process_mpa_reply(ep, skb);
1597 ep->rcv_seq += dlen;
1598 process_mpa_request(ep, skb);
1601 struct c4iw_qp_attributes attrs;
1602 BUG_ON(!ep->com.qp);
1604 pr_err("%s Unexpected streaming data." \
1605 " qpid %u ep %p state %d tid %u status %d\n",
1606 __func__, ep->com.qp->wq.sq.qid, ep,
1607 ep->com.state, ep->hwtid, status);
1608 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1609 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1610 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1617 mutex_unlock(&ep->com.mutex);
1619 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1623 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1626 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1628 unsigned int tid = GET_TID(rpl);
1629 struct tid_info *t = dev->rdev.lldi.tids;
1631 ep = lookup_tid(t, tid);
1633 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1636 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1637 mutex_lock(&ep->com.mutex);
1638 switch (ep->com.state) {
1640 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1641 __state_set(&ep->com, DEAD);
1645 printk(KERN_ERR "%s ep %p state %d\n",
1646 __func__, ep, ep->com.state);
1649 mutex_unlock(&ep->com.mutex);
1652 release_ep_resources(ep);
1656 static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1658 struct sk_buff *skb;
1659 struct fw_ofld_connection_wr *req;
1660 unsigned int mtu_idx;
1662 struct sockaddr_in *sin;
1664 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1665 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1666 memset(req, 0, sizeof(*req));
1667 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1668 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1669 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1670 ep->com.dev->rdev.lldi.ports[0],
1672 sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
1673 req->le.lport = sin->sin_port;
1674 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
1675 sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
1676 req->le.pport = sin->sin_port;
1677 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
1678 req->tcb.t_state_to_astid =
1679 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
1680 V_FW_OFLD_CONNECTION_WR_ASTID(atid));
1681 req->tcb.cplrxdataack_cplpassacceptrpl =
1682 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
1683 req->tcb.tx_max = (__force __be32) jiffies;
1684 req->tcb.rcv_adv = htons(1);
1685 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1686 wscale = compute_wscale(rcv_win);
1687 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
1688 (nocong ? NO_CONG(1) : 0) |
1693 L2T_IDX(ep->l2t->idx) |
1694 TX_CHAN(ep->tx_chan) |
1695 SMAC_SEL(ep->smac_idx) |
1697 ULP_MODE(ULP_MODE_TCPDDP) |
1698 RCV_BUFSIZ(rcv_win >> 10));
1699 req->tcb.opt2 = (__force __be32) (PACE(1) |
1700 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1702 CCTRL_ECN(enable_ecn) |
1703 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
1704 if (enable_tcp_timestamps)
1705 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
1706 if (enable_tcp_sack)
1707 req->tcb.opt2 |= (__force __be32) SACK_EN(1);
1708 if (wscale && enable_tcp_window_scaling)
1709 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
1710 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
1711 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
1712 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1713 set_bit(ACT_OFLD_CONN, &ep->com.history);
1714 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1718 * Return whether a failed active open has allocated a TID
1720 static inline int act_open_has_tid(int status)
1722 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1723 status != CPL_ERR_ARP_MISS;
1726 /* Returns whether a CPL status conveys negative advice.
1728 static int is_neg_adv(unsigned int status)
1730 return status == CPL_ERR_RTX_NEG_ADVICE ||
1731 status == CPL_ERR_PERSIST_NEG_ADVICE ||
1732 status == CPL_ERR_KEEPALV_NEG_ADVICE;
1735 #define ACT_OPEN_RETRY_COUNT 2
1737 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
1738 struct dst_entry *dst, struct c4iw_dev *cdev,
1741 struct neighbour *n;
1743 struct net_device *pdev;
1745 n = dst_neigh_lookup(dst, peer_ip);
1751 if (n->dev->flags & IFF_LOOPBACK) {
1753 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
1754 else if (IS_ENABLED(CONFIG_IPV6))
1755 for_each_netdev(&init_net, pdev) {
1756 if (ipv6_chk_addr(&init_net,
1757 (struct in6_addr *)peer_ip,
1768 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1772 ep->mtu = pdev->mtu;
1773 ep->tx_chan = cxgb4_port_chan(pdev);
1774 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1775 step = cdev->rdev.lldi.ntxq /
1776 cdev->rdev.lldi.nchan;
1777 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1778 step = cdev->rdev.lldi.nrxq /
1779 cdev->rdev.lldi.nchan;
1780 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1781 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1782 cxgb4_port_idx(pdev) * step];
1785 pdev = get_real_dev(n->dev);
1786 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1790 ep->mtu = dst_mtu(dst);
1791 ep->tx_chan = cxgb4_port_chan(n->dev);
1792 ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
1793 step = cdev->rdev.lldi.ntxq /
1794 cdev->rdev.lldi.nchan;
1795 ep->txq_idx = cxgb4_port_idx(n->dev) * step;
1796 ep->ctrlq_idx = cxgb4_port_idx(n->dev);
1797 step = cdev->rdev.lldi.nrxq /
1798 cdev->rdev.lldi.nchan;
1799 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1800 cxgb4_port_idx(n->dev) * step];
1803 ep->retry_with_mpa_v1 = 0;
1804 ep->tried_with_mpa_v1 = 0;
1816 static int c4iw_reconnect(struct c4iw_ep *ep)
1819 struct sockaddr_in *laddr = (struct sockaddr_in *)
1820 &ep->com.cm_id->local_addr;
1821 struct sockaddr_in *raddr = (struct sockaddr_in *)
1822 &ep->com.cm_id->remote_addr;
1823 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
1824 &ep->com.cm_id->local_addr;
1825 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
1826 &ep->com.cm_id->remote_addr;
1830 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1831 init_timer(&ep->timer);
1834 * Allocate an active TID to initiate a TCP connection.
1836 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1837 if (ep->atid == -1) {
1838 pr_err("%s - cannot alloc atid.\n", __func__);
1842 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1845 if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
1846 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
1847 raddr->sin_addr.s_addr, laddr->sin_port,
1848 raddr->sin_port, 0);
1850 ra = (__u8 *)&raddr->sin_addr;
1852 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
1853 raddr6->sin6_addr.s6_addr,
1854 laddr6->sin6_port, raddr6->sin6_port, 0,
1855 raddr6->sin6_scope_id);
1857 ra = (__u8 *)&raddr6->sin6_addr;
1860 pr_err("%s - cannot find route.\n", __func__);
1861 err = -EHOSTUNREACH;
1864 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false);
1866 pr_err("%s - cannot alloc l2e.\n", __func__);
1870 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1871 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1874 state_set(&ep->com, CONNECTING);
1877 /* send connect request to rnic */
1878 err = send_connect(ep);
1882 cxgb4_l2t_release(ep->l2t);
1884 dst_release(ep->dst);
1886 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
1887 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1890 * remember to send notification to upper layer.
1891 * We are in here so the upper layer is not aware that this is
1892 * re-connect attempt and so, upper layer is still waiting for
1893 * response of 1st connect request.
1895 connect_reply_upcall(ep, -ECONNRESET);
1896 c4iw_put_ep(&ep->com);
1901 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1904 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1905 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1906 ntohl(rpl->atid_status)));
1907 struct tid_info *t = dev->rdev.lldi.tids;
1908 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1909 struct sockaddr_in *la;
1910 struct sockaddr_in *ra;
1911 struct sockaddr_in6 *la6;
1912 struct sockaddr_in6 *ra6;
1914 ep = lookup_atid(t, atid);
1915 la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
1916 ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
1917 la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
1918 ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
1920 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1921 status, status2errno(status));
1923 if (is_neg_adv(status)) {
1924 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1929 set_bit(ACT_OPEN_RPL, &ep->com.history);
1932 * Log interesting failures.
1935 case CPL_ERR_CONN_RESET:
1936 case CPL_ERR_CONN_TIMEDOUT:
1938 case CPL_ERR_TCAM_FULL:
1939 mutex_lock(&dev->rdev.stats.lock);
1940 dev->rdev.stats.tcam_full++;
1941 mutex_unlock(&dev->rdev.stats.lock);
1942 if (ep->com.local_addr.ss_family == AF_INET &&
1943 dev->rdev.lldi.enable_fw_ofld_conn) {
1944 send_fw_act_open_req(ep,
1945 GET_TID_TID(GET_AOPEN_ATID(
1946 ntohl(rpl->atid_status))));
1950 case CPL_ERR_CONN_EXIST:
1951 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
1952 set_bit(ACT_RETRY_INUSE, &ep->com.history);
1953 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
1955 cxgb4_free_atid(t, atid);
1956 dst_release(ep->dst);
1957 cxgb4_l2t_release(ep->l2t);
1963 if (ep->com.local_addr.ss_family == AF_INET) {
1964 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
1965 atid, status, status2errno(status),
1966 &la->sin_addr.s_addr, ntohs(la->sin_port),
1967 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
1969 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
1970 atid, status, status2errno(status),
1971 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
1972 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
1977 connect_reply_upcall(ep, status2errno(status));
1978 state_set(&ep->com, DEAD);
1980 if (status && act_open_has_tid(status))
1981 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1983 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1984 cxgb4_free_atid(t, atid);
1985 dst_release(ep->dst);
1986 cxgb4_l2t_release(ep->l2t);
1987 c4iw_put_ep(&ep->com);
1992 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1994 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1995 struct tid_info *t = dev->rdev.lldi.tids;
1996 unsigned int stid = GET_TID(rpl);
1997 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
2000 PDBG("%s stid %d lookup failure!\n", __func__, stid);
2003 PDBG("%s ep %p status %d error %d\n", __func__, ep,
2004 rpl->status, status2errno(rpl->status));
2005 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2011 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2013 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
2014 struct tid_info *t = dev->rdev.lldi.tids;
2015 unsigned int stid = GET_TID(rpl);
2016 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
2018 PDBG("%s ep %p\n", __func__, ep);
2019 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2023 static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2024 struct cpl_pass_accept_req *req)
2026 struct cpl_pass_accept_rpl *rpl;
2027 unsigned int mtu_idx;
2032 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2033 BUG_ON(skb_cloned(skb));
2034 skb_trim(skb, sizeof(*rpl));
2036 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
2037 wscale = compute_wscale(rcv_win);
2038 opt0 = (nocong ? NO_CONG(1) : 0) |
2043 L2T_IDX(ep->l2t->idx) |
2044 TX_CHAN(ep->tx_chan) |
2045 SMAC_SEL(ep->smac_idx) |
2046 DSCP(ep->tos >> 2) |
2047 ULP_MODE(ULP_MODE_TCPDDP) |
2048 RCV_BUFSIZ(rcv_win>>10);
2049 opt2 = RX_CHANNEL(0) |
2050 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
2052 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2053 opt2 |= TSTAMPS_EN(1);
2054 if (enable_tcp_sack && req->tcpopt.sack)
2056 if (wscale && enable_tcp_window_scaling)
2057 opt2 |= WND_SCALE_EN(1);
2059 const struct tcphdr *tcph;
2060 u32 hlen = ntohl(req->hdr_len);
2062 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
2064 if (tcph->ece && tcph->cwr)
2065 opt2 |= CCTRL_ECN(1);
2067 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2068 opt2 |= T5_OPT_2_VALID;
2069 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
2073 INIT_TP_WR(rpl, ep->hwtid);
2074 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2076 rpl->opt0 = cpu_to_be64(opt0);
2077 rpl->opt2 = cpu_to_be32(opt2);
2078 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2079 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
2080 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2085 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2087 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2088 BUG_ON(skb_cloned(skb));
2089 skb_trim(skb, sizeof(struct cpl_tid_release));
2091 release_tid(&dev->rdev, hwtid, skb);
2095 static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
2096 __u8 *local_ip, __u8 *peer_ip,
2097 __be16 *local_port, __be16 *peer_port)
2099 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
2100 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
2101 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
2102 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
2103 struct tcphdr *tcp = (struct tcphdr *)
2104 ((u8 *)(req + 1) + eth_len + ip_len);
2106 if (ip->version == 4) {
2107 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
2108 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
2111 memcpy(peer_ip, &ip->saddr, 4);
2112 memcpy(local_ip, &ip->daddr, 4);
2114 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
2115 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
2118 memcpy(peer_ip, ip6->saddr.s6_addr, 16);
2119 memcpy(local_ip, ip6->daddr.s6_addr, 16);
2121 *peer_port = tcp->source;
2122 *local_port = tcp->dest;
2127 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2129 struct c4iw_ep *child_ep = NULL, *parent_ep;
2130 struct cpl_pass_accept_req *req = cplhdr(skb);
2131 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
2132 struct tid_info *t = dev->rdev.lldi.tids;
2133 unsigned int hwtid = GET_TID(req);
2134 struct dst_entry *dst;
2135 __u8 local_ip[16], peer_ip[16];
2136 __be16 local_port, peer_port;
2138 u16 peer_mss = ntohs(req->tcpopt.mss);
2141 parent_ep = lookup_stid(t, stid);
2143 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2147 if (state_read(&parent_ep->com) != LISTEN) {
2148 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
2153 get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port);
2155 /* Find output route */
2157 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2158 , __func__, parent_ep, hwtid,
2159 local_ip, peer_ip, ntohs(local_port),
2160 ntohs(peer_port), peer_mss);
2161 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
2162 local_port, peer_port,
2163 GET_POPEN_TOS(ntohl(req->tos_stid)));
2165 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2166 , __func__, parent_ep, hwtid,
2167 local_ip, peer_ip, ntohs(local_port),
2168 ntohs(peer_port), peer_mss);
2169 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
2170 PASS_OPEN_TOS(ntohl(req->tos_stid)),
2171 ((struct sockaddr_in6 *)
2172 &parent_ep->com.local_addr)->sin6_scope_id);
2175 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
2180 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2182 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
2188 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false);
2190 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
2197 if (peer_mss && child_ep->mtu > (peer_mss + 40))
2198 child_ep->mtu = peer_mss + 40;
2200 state_set(&child_ep->com, CONNECTING);
2201 child_ep->com.dev = dev;
2202 child_ep->com.cm_id = NULL;
2204 struct sockaddr_in *sin = (struct sockaddr_in *)
2205 &child_ep->com.local_addr;
2206 sin->sin_family = PF_INET;
2207 sin->sin_port = local_port;
2208 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2209 sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2210 sin->sin_family = PF_INET;
2211 sin->sin_port = peer_port;
2212 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2214 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
2215 &child_ep->com.local_addr;
2216 sin6->sin6_family = PF_INET6;
2217 sin6->sin6_port = local_port;
2218 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2219 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2220 sin6->sin6_family = PF_INET6;
2221 sin6->sin6_port = peer_port;
2222 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2224 c4iw_get_ep(&parent_ep->com);
2225 child_ep->parent_ep = parent_ep;
2226 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
2227 child_ep->dst = dst;
2228 child_ep->hwtid = hwtid;
2230 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
2231 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
2233 init_timer(&child_ep->timer);
2234 cxgb4_insert_tid(t, child_ep, hwtid);
2235 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
2236 accept_cr(child_ep, skb, req);
2237 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2240 reject_cr(dev, hwtid, skb);
2245 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2248 struct cpl_pass_establish *req = cplhdr(skb);
2249 struct tid_info *t = dev->rdev.lldi.tids;
2250 unsigned int tid = GET_TID(req);
2252 ep = lookup_tid(t, tid);
2253 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2254 ep->snd_seq = be32_to_cpu(req->snd_isn);
2255 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2257 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2258 ntohs(req->tcp_opt));
2260 set_emss(ep, ntohs(req->tcp_opt));
2262 dst_confirm(ep->dst);
2263 state_set(&ep->com, MPA_REQ_WAIT);
2265 send_flowc(ep, skb);
2266 set_bit(PASS_ESTAB, &ep->com.history);
2271 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2273 struct cpl_peer_close *hdr = cplhdr(skb);
2275 struct c4iw_qp_attributes attrs;
2278 struct tid_info *t = dev->rdev.lldi.tids;
2279 unsigned int tid = GET_TID(hdr);
2282 ep = lookup_tid(t, tid);
2283 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2284 dst_confirm(ep->dst);
2286 set_bit(PEER_CLOSE, &ep->com.history);
2287 mutex_lock(&ep->com.mutex);
2288 switch (ep->com.state) {
2290 __state_set(&ep->com, CLOSING);
2293 __state_set(&ep->com, CLOSING);
2294 connect_reply_upcall(ep, -ECONNRESET);
2299 * We're gonna mark this puppy DEAD, but keep
2300 * the reference on it until the ULP accepts or
2301 * rejects the CR. Also wake up anyone waiting
2302 * in rdma connection migration (see c4iw_accept_cr()).
2304 __state_set(&ep->com, CLOSING);
2305 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2306 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2309 __state_set(&ep->com, CLOSING);
2310 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2311 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2315 __state_set(&ep->com, CLOSING);
2316 attrs.next_state = C4IW_QP_STATE_CLOSING;
2317 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2318 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2319 if (ret != -ECONNRESET) {
2320 peer_close_upcall(ep);
2328 __state_set(&ep->com, MORIBUND);
2332 (void)stop_ep_timer(ep);
2333 if (ep->com.cm_id && ep->com.qp) {
2334 attrs.next_state = C4IW_QP_STATE_IDLE;
2335 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2336 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2338 close_complete_upcall(ep, 0);
2339 __state_set(&ep->com, DEAD);
2349 mutex_unlock(&ep->com.mutex);
2351 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2353 release_ep_resources(ep);
2357 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2359 struct cpl_abort_req_rss *req = cplhdr(skb);
2361 struct cpl_abort_rpl *rpl;
2362 struct sk_buff *rpl_skb;
2363 struct c4iw_qp_attributes attrs;
2366 struct tid_info *t = dev->rdev.lldi.tids;
2367 unsigned int tid = GET_TID(req);
2369 ep = lookup_tid(t, tid);
2370 if (is_neg_adv(req->status)) {
2371 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2375 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2377 set_bit(PEER_ABORT, &ep->com.history);
2380 * Wake up any threads in rdma_init() or rdma_fini().
2381 * However, this is not needed if com state is just
2384 if (ep->com.state != MPA_REQ_SENT)
2385 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2387 mutex_lock(&ep->com.mutex);
2388 switch (ep->com.state) {
2392 (void)stop_ep_timer(ep);
2395 (void)stop_ep_timer(ep);
2396 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
2397 connect_reply_upcall(ep, -ECONNRESET);
2400 * we just don't send notification upwards because we
2401 * want to retry with mpa_v1 without upper layers even
2404 * do some housekeeping so as to re-initiate the
2407 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2409 ep->retry_with_mpa_v1 = 1;
2421 if (ep->com.cm_id && ep->com.qp) {
2422 attrs.next_state = C4IW_QP_STATE_ERROR;
2423 ret = c4iw_modify_qp(ep->com.qp->rhp,
2424 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2428 "%s - qp <- error failed!\n",
2431 peer_abort_upcall(ep);
2436 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2437 mutex_unlock(&ep->com.mutex);
2443 dst_confirm(ep->dst);
2444 if (ep->com.state != ABORTING) {
2445 __state_set(&ep->com, DEAD);
2446 /* we don't release if we want to retry with mpa_v1 */
2447 if (!ep->retry_with_mpa_v1)
2450 mutex_unlock(&ep->com.mutex);
2452 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
2454 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
2459 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2460 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2461 INIT_TP_WR(rpl, ep->hwtid);
2462 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2463 rpl->cmd = CPL_ABORT_NO_RST;
2464 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2467 release_ep_resources(ep);
2468 else if (ep->retry_with_mpa_v1) {
2469 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
2470 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2471 dst_release(ep->dst);
2472 cxgb4_l2t_release(ep->l2t);
2479 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2482 struct c4iw_qp_attributes attrs;
2483 struct cpl_close_con_rpl *rpl = cplhdr(skb);
2485 struct tid_info *t = dev->rdev.lldi.tids;
2486 unsigned int tid = GET_TID(rpl);
2488 ep = lookup_tid(t, tid);
2490 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2493 /* The cm_id may be null if we failed to connect */
2494 mutex_lock(&ep->com.mutex);
2495 switch (ep->com.state) {
2497 __state_set(&ep->com, MORIBUND);
2500 (void)stop_ep_timer(ep);
2501 if ((ep->com.cm_id) && (ep->com.qp)) {
2502 attrs.next_state = C4IW_QP_STATE_IDLE;
2503 c4iw_modify_qp(ep->com.qp->rhp,
2505 C4IW_QP_ATTR_NEXT_STATE,
2508 close_complete_upcall(ep, 0);
2509 __state_set(&ep->com, DEAD);
2519 mutex_unlock(&ep->com.mutex);
2521 release_ep_resources(ep);
2525 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2527 struct cpl_rdma_terminate *rpl = cplhdr(skb);
2528 struct tid_info *t = dev->rdev.lldi.tids;
2529 unsigned int tid = GET_TID(rpl);
2531 struct c4iw_qp_attributes attrs;
2533 ep = lookup_tid(t, tid);
2536 if (ep && ep->com.qp) {
2537 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2538 ep->com.qp->wq.sq.qid);
2539 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2540 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2541 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2543 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2549 * Upcall from the adapter indicating data has been transmitted.
2550 * For us its just the single MPA request or reply. We can now free
2551 * the skb holding the mpa message.
2553 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2556 struct cpl_fw4_ack *hdr = cplhdr(skb);
2557 u8 credits = hdr->credits;
2558 unsigned int tid = GET_TID(hdr);
2559 struct tid_info *t = dev->rdev.lldi.tids;
2562 ep = lookup_tid(t, tid);
2563 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
2565 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2566 __func__, ep, ep->hwtid, state_read(&ep->com));
2570 dst_confirm(ep->dst);
2572 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2573 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
2574 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2575 kfree_skb(ep->mpa_skb);
2581 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2585 struct c4iw_ep *ep = to_ep(cm_id);
2586 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2588 mutex_lock(&ep->com.mutex);
2589 if (ep->com.state == DEAD) {
2590 mutex_unlock(&ep->com.mutex);
2591 c4iw_put_ep(&ep->com);
2594 set_bit(ULP_REJECT, &ep->com.history);
2595 BUG_ON(ep->com.state != MPA_REQ_RCVD);
2597 abort_connection(ep, NULL, GFP_KERNEL);
2599 err = send_mpa_reject(ep, pdata, pdata_len);
2602 mutex_unlock(&ep->com.mutex);
2604 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2605 c4iw_put_ep(&ep->com);
2609 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2612 struct c4iw_qp_attributes attrs;
2613 enum c4iw_qp_attr_mask mask;
2614 struct c4iw_ep *ep = to_ep(cm_id);
2615 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2616 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2618 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2620 mutex_lock(&ep->com.mutex);
2621 if (ep->com.state == DEAD) {
2626 BUG_ON(ep->com.state != MPA_REQ_RCVD);
2629 set_bit(ULP_ACCEPT, &ep->com.history);
2630 if ((conn_param->ord > c4iw_max_read_depth) ||
2631 (conn_param->ird > c4iw_max_read_depth)) {
2632 abort_connection(ep, NULL, GFP_KERNEL);
2637 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2638 if (conn_param->ord > ep->ird) {
2639 ep->ird = conn_param->ird;
2640 ep->ord = conn_param->ord;
2641 send_mpa_reject(ep, conn_param->private_data,
2642 conn_param->private_data_len);
2643 abort_connection(ep, NULL, GFP_KERNEL);
2647 if (conn_param->ird > ep->ord) {
2649 conn_param->ird = 1;
2651 abort_connection(ep, NULL, GFP_KERNEL);
2658 ep->ird = conn_param->ird;
2659 ep->ord = conn_param->ord;
2661 if (ep->mpa_attr.version != 2)
2662 if (peer2peer && ep->ird == 0)
2665 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2667 cm_id->add_ref(cm_id);
2668 ep->com.cm_id = cm_id;
2672 /* bind QP to EP and move to RTS */
2673 attrs.mpa_attr = ep->mpa_attr;
2674 attrs.max_ird = ep->ird;
2675 attrs.max_ord = ep->ord;
2676 attrs.llp_stream_handle = ep;
2677 attrs.next_state = C4IW_QP_STATE_RTS;
2679 /* bind QP and TID with INIT_WR */
2680 mask = C4IW_QP_ATTR_NEXT_STATE |
2681 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2682 C4IW_QP_ATTR_MPA_ATTR |
2683 C4IW_QP_ATTR_MAX_IRD |
2684 C4IW_QP_ATTR_MAX_ORD;
2686 err = c4iw_modify_qp(ep->com.qp->rhp,
2687 ep->com.qp, mask, &attrs, 1);
2690 err = send_mpa_reply(ep, conn_param->private_data,
2691 conn_param->private_data_len);
2695 __state_set(&ep->com, FPDU_MODE);
2696 established_upcall(ep);
2697 mutex_unlock(&ep->com.mutex);
2698 c4iw_put_ep(&ep->com);
2701 ep->com.cm_id = NULL;
2702 cm_id->rem_ref(cm_id);
2704 mutex_unlock(&ep->com.mutex);
2705 c4iw_put_ep(&ep->com);
2709 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2711 struct in_device *ind;
2713 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
2714 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
2716 ind = in_dev_get(dev->rdev.lldi.ports[0]);
2718 return -EADDRNOTAVAIL;
2719 for_primary_ifa(ind) {
2720 laddr->sin_addr.s_addr = ifa->ifa_address;
2721 raddr->sin_addr.s_addr = ifa->ifa_address;
2727 return found ? 0 : -EADDRNOTAVAIL;
2730 static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
2731 unsigned char banned_flags)
2733 struct inet6_dev *idev;
2734 int err = -EADDRNOTAVAIL;
2737 idev = __in6_dev_get(dev);
2739 struct inet6_ifaddr *ifp;
2741 read_lock_bh(&idev->lock);
2742 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2743 if (ifp->scope == IFA_LINK &&
2744 !(ifp->flags & banned_flags)) {
2745 memcpy(addr, &ifp->addr, 16);
2750 read_unlock_bh(&idev->lock);
2756 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2758 struct in6_addr uninitialized_var(addr);
2759 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
2760 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
2762 if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
2763 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
2764 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
2767 return -EADDRNOTAVAIL;
2770 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2772 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2775 struct sockaddr_in *laddr;
2776 struct sockaddr_in *raddr;
2777 struct sockaddr_in6 *laddr6;
2778 struct sockaddr_in6 *raddr6;
2779 struct iwpm_dev_data pm_reg_msg;
2780 struct iwpm_sa_data pm_msg;
2785 if ((conn_param->ord > c4iw_max_read_depth) ||
2786 (conn_param->ird > c4iw_max_read_depth)) {
2790 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2792 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2796 init_timer(&ep->timer);
2797 ep->plen = conn_param->private_data_len;
2799 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2800 conn_param->private_data, ep->plen);
2801 ep->ird = conn_param->ird;
2802 ep->ord = conn_param->ord;
2804 if (peer2peer && ep->ord == 0)
2807 cm_id->add_ref(cm_id);
2809 ep->com.cm_id = cm_id;
2810 ep->com.qp = get_qhp(dev, conn_param->qpn);
2812 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
2817 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
2821 * Allocate an active TID to initiate a TCP connection.
2823 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
2824 if (ep->atid == -1) {
2825 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2829 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
2831 memcpy(&ep->com.local_addr, &cm_id->local_addr,
2832 sizeof(ep->com.local_addr));
2833 memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
2834 sizeof(ep->com.remote_addr));
2836 /* No port mapper available, go with the specified peer information */
2837 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
2838 sizeof(ep->com.mapped_local_addr));
2839 memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
2840 sizeof(ep->com.mapped_remote_addr));
2842 c4iw_form_reg_msg(dev, &pm_reg_msg);
2843 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
2845 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
2846 __func__, iwpm_err);
2848 if (iwpm_valid_pid() && !iwpm_err) {
2849 c4iw_form_pm_msg(ep, &pm_msg);
2850 iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
2852 PDBG("%s: Port Mapper query fail (err = %d).\n",
2853 __func__, iwpm_err);
2855 c4iw_record_pm_msg(ep, &pm_msg);
2857 if (iwpm_create_mapinfo(&ep->com.local_addr,
2858 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
2859 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
2863 print_addr(&ep->com, __func__, "add_query/create_mapinfo");
2864 set_bit(RELEASE_MAPINFO, &ep->com.flags);
2866 laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
2867 raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
2868 laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
2869 raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
2871 if (cm_id->remote_addr.ss_family == AF_INET) {
2873 ra = (__u8 *)&raddr->sin_addr;
2876 * Handle loopback requests to INADDR_ANY.
2878 if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
2879 err = pick_local_ipaddrs(dev, cm_id);
2885 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
2886 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
2887 ra, ntohs(raddr->sin_port));
2888 ep->dst = find_route(dev, laddr->sin_addr.s_addr,
2889 raddr->sin_addr.s_addr, laddr->sin_port,
2890 raddr->sin_port, 0);
2893 ra = (__u8 *)&raddr6->sin6_addr;
2896 * Handle loopback requests to INADDR_ANY.
2898 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
2899 err = pick_local_ip6addrs(dev, cm_id);
2905 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
2906 __func__, laddr6->sin6_addr.s6_addr,
2907 ntohs(laddr6->sin6_port),
2908 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
2909 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
2910 raddr6->sin6_addr.s6_addr,
2911 laddr6->sin6_port, raddr6->sin6_port, 0,
2912 raddr6->sin6_scope_id);
2915 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2916 err = -EHOSTUNREACH;
2920 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
2922 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
2926 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2927 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2930 state_set(&ep->com, CONNECTING);
2933 /* send connect request to rnic */
2934 err = send_connect(ep);
2938 cxgb4_l2t_release(ep->l2t);
2940 dst_release(ep->dst);
2942 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2943 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2945 cm_id->rem_ref(cm_id);
2946 c4iw_put_ep(&ep->com);
2951 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
2954 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
2955 &ep->com.mapped_local_addr;
2957 c4iw_init_wr_wait(&ep->com.wr_wait);
2958 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
2959 ep->stid, &sin6->sin6_addr,
2961 ep->com.dev->rdev.lldi.rxq_ids[0]);
2963 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
2967 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
2969 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
2973 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
2976 struct sockaddr_in *sin = (struct sockaddr_in *)
2977 &ep->com.mapped_local_addr;
2979 if (dev->rdev.lldi.enable_fw_ofld_conn) {
2981 err = cxgb4_create_server_filter(
2982 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2983 sin->sin_addr.s_addr, sin->sin_port, 0,
2984 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
2985 if (err == -EBUSY) {
2986 set_current_state(TASK_UNINTERRUPTIBLE);
2987 schedule_timeout(usecs_to_jiffies(100));
2989 } while (err == -EBUSY);
2991 c4iw_init_wr_wait(&ep->com.wr_wait);
2992 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
2993 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
2994 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
2996 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3001 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3003 &sin->sin_addr, ntohs(sin->sin_port));
3007 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3010 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3011 struct c4iw_listen_ep *ep;
3012 struct iwpm_dev_data pm_reg_msg;
3013 struct iwpm_sa_data pm_msg;
3018 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3020 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3024 PDBG("%s ep %p\n", __func__, ep);
3025 cm_id->add_ref(cm_id);
3026 ep->com.cm_id = cm_id;
3028 ep->backlog = backlog;
3029 memcpy(&ep->com.local_addr, &cm_id->local_addr,
3030 sizeof(ep->com.local_addr));
3033 * Allocate a server TID.
3035 if (dev->rdev.lldi.enable_fw_ofld_conn &&
3036 ep->com.local_addr.ss_family == AF_INET)
3037 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3038 cm_id->local_addr.ss_family, ep);
3040 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3041 cm_id->local_addr.ss_family, ep);
3043 if (ep->stid == -1) {
3044 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
3048 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
3050 /* No port mapper available, go with the specified info */
3051 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
3052 sizeof(ep->com.mapped_local_addr));
3054 c4iw_form_reg_msg(dev, &pm_reg_msg);
3055 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
3057 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3058 __func__, iwpm_err);
3060 if (iwpm_valid_pid() && !iwpm_err) {
3061 memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
3062 sizeof(ep->com.local_addr));
3063 iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
3065 PDBG("%s: Port Mapper query fail (err = %d).\n",
3066 __func__, iwpm_err);
3068 memcpy(&ep->com.mapped_local_addr,
3069 &pm_msg.mapped_loc_addr,
3070 sizeof(ep->com.mapped_local_addr));
3072 if (iwpm_create_mapinfo(&ep->com.local_addr,
3073 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
3077 print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
3079 set_bit(RELEASE_MAPINFO, &ep->com.flags);
3080 state_set(&ep->com, LISTEN);
3081 if (ep->com.local_addr.ss_family == AF_INET)
3082 err = create_server4(dev, ep);
3084 err = create_server6(dev, ep);
3086 cm_id->provider_data = ep;
3091 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3092 ep->com.local_addr.ss_family);
3094 cm_id->rem_ref(cm_id);
3095 c4iw_put_ep(&ep->com);
3101 int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3104 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3106 PDBG("%s ep %p\n", __func__, ep);
3109 state_set(&ep->com, DEAD);
3110 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3111 ep->com.local_addr.ss_family == AF_INET) {
3112 err = cxgb4_remove_server_filter(
3113 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3114 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3116 c4iw_init_wr_wait(&ep->com.wr_wait);
3117 err = cxgb4_remove_server(
3118 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3119 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3122 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
3125 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
3126 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3127 ep->com.local_addr.ss_family);
3129 cm_id->rem_ref(cm_id);
3130 c4iw_put_ep(&ep->com);
3134 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3139 struct c4iw_rdev *rdev;
3141 mutex_lock(&ep->com.mutex);
3143 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
3144 states[ep->com.state], abrupt);
3146 rdev = &ep->com.dev->rdev;
3147 if (c4iw_fatal_error(rdev)) {
3149 close_complete_upcall(ep, -EIO);
3150 ep->com.state = DEAD;
3152 switch (ep->com.state) {
3160 ep->com.state = ABORTING;
3162 ep->com.state = CLOSING;
3165 set_bit(CLOSE_SENT, &ep->com.flags);
3168 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3171 (void)stop_ep_timer(ep);
3172 ep->com.state = ABORTING;
3174 ep->com.state = MORIBUND;
3180 PDBG("%s ignoring disconnect ep %p state %u\n",
3181 __func__, ep, ep->com.state);
3190 set_bit(EP_DISC_ABORT, &ep->com.history);
3191 close_complete_upcall(ep, -ECONNRESET);
3192 ret = send_abort(ep, NULL, gfp);
3194 set_bit(EP_DISC_CLOSE, &ep->com.history);
3195 ret = send_halfclose(ep, gfp);
3200 mutex_unlock(&ep->com.mutex);
3202 release_ep_resources(ep);
3206 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3207 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3210 int atid = be32_to_cpu(req->tid);
3212 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3213 (__force u32) req->tid);
3217 switch (req->retval) {
3219 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3220 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3221 send_fw_act_open_req(ep, atid);
3225 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3226 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3227 send_fw_act_open_req(ep, atid);
3232 pr_info("%s unexpected ofld conn wr retval %d\n",
3233 __func__, req->retval);
3236 pr_err("active ofld_connect_wr failure %d atid %d\n",
3238 mutex_lock(&dev->rdev.stats.lock);
3239 dev->rdev.stats.act_ofld_conn_fails++;
3240 mutex_unlock(&dev->rdev.stats.lock);
3241 connect_reply_upcall(ep, status2errno(req->retval));
3242 state_set(&ep->com, DEAD);
3243 remove_handle(dev, &dev->atid_idr, atid);
3244 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3245 dst_release(ep->dst);
3246 cxgb4_l2t_release(ep->l2t);
3247 c4iw_put_ep(&ep->com);
3250 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3251 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3253 struct sk_buff *rpl_skb;
3254 struct cpl_pass_accept_req *cpl;
3257 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
3260 PDBG("%s passive open failure %d\n", __func__, req->retval);
3261 mutex_lock(&dev->rdev.stats.lock);
3262 dev->rdev.stats.pas_ofld_conn_fails++;
3263 mutex_unlock(&dev->rdev.stats.lock);
3266 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3267 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
3268 (__force u32) htonl(
3269 (__force u32) req->tid)));
3270 ret = pass_accept_req(dev, rpl_skb);
3277 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3279 struct cpl_fw6_msg *rpl = cplhdr(skb);
3280 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3282 switch (rpl->type) {
3284 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3286 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3287 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3288 switch (req->t_state) {
3290 active_ofld_conn_reply(dev, skb, req);
3293 passive_ofld_conn_reply(dev, skb, req);
3296 pr_err("%s unexpected ofld conn wr state %d\n",
3297 __func__, req->t_state);
3305 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3308 u16 vlantag, len, hdr_len, eth_hdr_len;
3310 struct cpl_rx_pkt *cpl = cplhdr(skb);
3311 struct cpl_pass_accept_req *req;
3312 struct tcp_options_received tmp_opt;
3313 struct c4iw_dev *dev;
3315 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3316 /* Store values from cpl_rx_pkt in temporary location. */
3317 vlantag = (__force u16) cpl->vlan;
3318 len = (__force u16) cpl->len;
3319 l2info = (__force u32) cpl->l2info;
3320 hdr_len = (__force u16) cpl->hdr_len;
3323 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3326 * We need to parse the TCP options from SYN packet.
3327 * to generate cpl_pass_accept_req.
3329 memset(&tmp_opt, 0, sizeof(tmp_opt));
3330 tcp_clear_options(&tmp_opt);
3331 tcp_parse_options(skb, &tmp_opt, 0, NULL);
3333 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3334 memset(req, 0, sizeof(*req));
3335 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
3336 V_SYN_MAC_IDX(G_RX_MACIDX(
3337 (__force int) htonl(l2info))) |
3339 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3340 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
3341 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
3342 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
3343 (__force int) htonl(l2info))) |
3344 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
3345 (__force int) htons(hdr_len))) |
3346 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
3347 (__force int) htons(hdr_len))) |
3348 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
3349 req->vlan = (__force __be16) vlantag;
3350 req->len = (__force __be16) len;
3351 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
3352 PASS_OPEN_TOS(tos));
3353 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3354 if (tmp_opt.wscale_ok)
3355 req->tcpopt.wsf = tmp_opt.snd_wscale;
3356 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3357 if (tmp_opt.sack_ok)
3358 req->tcpopt.sack = 1;
3359 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3363 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3364 __be32 laddr, __be16 lport,
3365 __be32 raddr, __be16 rport,
3366 u32 rcv_isn, u32 filter, u16 window,
3367 u32 rss_qid, u8 port_id)
3369 struct sk_buff *req_skb;
3370 struct fw_ofld_connection_wr *req;
3371 struct cpl_pass_accept_req *cpl = cplhdr(skb);
3374 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3375 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3376 memset(req, 0, sizeof(*req));
3377 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
3378 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
3379 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
3380 req->le.filter = (__force __be32) filter;
3381 req->le.lport = lport;
3382 req->le.pport = rport;
3383 req->le.u.ipv4.lip = laddr;
3384 req->le.u.ipv4.pip = raddr;
3385 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3386 req->tcb.rcv_adv = htons(window);
3387 req->tcb.t_state_to_astid =
3388 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
3389 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
3390 V_FW_OFLD_CONNECTION_WR_ASTID(
3391 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
3394 * We store the qid in opt2 which will be used by the firmware
3395 * to send us the wr response.
3397 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
3400 * We initialize the MSS index in TCB to 0xF.
3401 * So that when driver sends cpl_pass_accept_rpl
3402 * TCB picks up the correct value. If this was 0
3403 * TP will ignore any value > 0 for MSS index.
3405 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
3406 req->cookie = (unsigned long)skb;
3408 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3409 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3411 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
3419 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3420 * messages when a filter is being used instead of server to
3421 * redirect a syn packet. When packets hit filter they are redirected
3422 * to the offload queue and driver tries to establish the connection
3423 * using firmware work request.
3425 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3428 unsigned int filter;
3429 struct ethhdr *eh = NULL;
3430 struct vlan_ethhdr *vlan_eh = NULL;
3432 struct tcphdr *tcph;
3433 struct rss_header *rss = (void *)skb->data;
3434 struct cpl_rx_pkt *cpl = (void *)skb->data;
3435 struct cpl_pass_accept_req *req = (void *)(rss + 1);
3436 struct l2t_entry *e;
3437 struct dst_entry *dst;
3438 struct c4iw_ep *lep;
3440 struct port_info *pi;
3441 struct net_device *pdev;
3442 u16 rss_qid, eth_hdr_len;
3445 struct neighbour *neigh;
3447 /* Drop all non-SYN packets */
3448 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
3452 * Drop all packets which did not hit the filter.
3453 * Unlikely to happen.
3455 if (!(rss->filter_hit && rss->filter_tid))
3459 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3461 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3463 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3465 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3469 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3470 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
3471 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
3472 if (eth_hdr_len == ETH_HLEN) {
3473 eh = (struct ethhdr *)(req + 1);
3474 iph = (struct iphdr *)(eh + 1);
3476 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3477 iph = (struct iphdr *)(vlan_eh + 1);
3478 skb->vlan_tci = ntohs(cpl->vlan);
3481 if (iph->version != 0x4)
3484 tcph = (struct tcphdr *)(iph + 1);
3485 skb_set_network_header(skb, (void *)iph - (void *)rss);
3486 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3489 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3490 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3491 ntohs(tcph->source), iph->tos);
3493 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3496 pr_err("%s - failed to find dst entry!\n",
3500 neigh = dst_neigh_lookup_skb(dst, skb);
3503 pr_err("%s - failed to allocate neigh!\n",
3508 if (neigh->dev->flags & IFF_LOOPBACK) {
3509 pdev = ip_dev_find(&init_net, iph->daddr);
3510 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3512 pi = (struct port_info *)netdev_priv(pdev);
3513 tx_chan = cxgb4_port_chan(pdev);
3516 pdev = get_real_dev(neigh->dev);
3517 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3519 pi = (struct port_info *)netdev_priv(pdev);
3520 tx_chan = cxgb4_port_chan(pdev);
3522 neigh_release(neigh);
3524 pr_err("%s - failed to allocate l2t entry!\n",
3529 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3530 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3531 window = (__force u16) htons((__force u16)tcph->window);
3533 /* Calcuate filter portion for LE region. */
3534 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3535 dev->rdev.lldi.ports[0],
3539 * Synthesize the cpl_pass_accept_req. We have everything except the
3540 * TID. Once firmware sends a reply with TID we update the TID field
3541 * in cpl and pass it through the regular cpl_pass_accept_req path.
3543 build_cpl_pass_accept_req(skb, stid, iph->tos);
3544 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3545 tcph->source, ntohl(tcph->seq), filter, window,
3546 rss_qid, pi->port_id);
3547 cxgb4_l2t_release(e);
3555 * These are the real handlers that are called from a
3558 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
3559 [CPL_ACT_ESTABLISH] = act_establish,
3560 [CPL_ACT_OPEN_RPL] = act_open_rpl,
3561 [CPL_RX_DATA] = rx_data,
3562 [CPL_ABORT_RPL_RSS] = abort_rpl,
3563 [CPL_ABORT_RPL] = abort_rpl,
3564 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
3565 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
3566 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
3567 [CPL_PASS_ESTABLISH] = pass_establish,
3568 [CPL_PEER_CLOSE] = peer_close,
3569 [CPL_ABORT_REQ_RSS] = peer_abort,
3570 [CPL_CLOSE_CON_RPL] = close_con_rpl,
3571 [CPL_RDMA_TERMINATE] = terminate,
3572 [CPL_FW4_ACK] = fw4_ack,
3573 [CPL_FW6_MSG] = deferred_fw6_msg,
3574 [CPL_RX_PKT] = rx_pkt
3577 static void process_timeout(struct c4iw_ep *ep)
3579 struct c4iw_qp_attributes attrs;
3582 mutex_lock(&ep->com.mutex);
3583 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
3585 set_bit(TIMEDOUT, &ep->com.history);
3586 switch (ep->com.state) {
3588 __state_set(&ep->com, ABORTING);
3589 connect_reply_upcall(ep, -ETIMEDOUT);
3592 __state_set(&ep->com, ABORTING);
3596 if (ep->com.cm_id && ep->com.qp) {
3597 attrs.next_state = C4IW_QP_STATE_ERROR;
3598 c4iw_modify_qp(ep->com.qp->rhp,
3599 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
3602 __state_set(&ep->com, ABORTING);
3603 close_complete_upcall(ep, -ETIMEDOUT);
3609 * These states are expected if the ep timed out at the same
3610 * time as another thread was calling stop_ep_timer().
3611 * So we silently do nothing for these states.
3616 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3617 __func__, ep, ep->hwtid, ep->com.state);
3621 abort_connection(ep, NULL, GFP_KERNEL);
3622 mutex_unlock(&ep->com.mutex);
3623 c4iw_put_ep(&ep->com);
3626 static void process_timedout_eps(void)
3630 spin_lock_irq(&timeout_lock);
3631 while (!list_empty(&timeout_list)) {
3632 struct list_head *tmp;
3634 tmp = timeout_list.next;
3638 spin_unlock_irq(&timeout_lock);
3639 ep = list_entry(tmp, struct c4iw_ep, entry);
3640 process_timeout(ep);
3641 spin_lock_irq(&timeout_lock);
3643 spin_unlock_irq(&timeout_lock);
3646 static void process_work(struct work_struct *work)
3648 struct sk_buff *skb = NULL;
3649 struct c4iw_dev *dev;
3650 struct cpl_act_establish *rpl;
3651 unsigned int opcode;
3654 process_timedout_eps();
3655 while ((skb = skb_dequeue(&rxq))) {
3657 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3658 opcode = rpl->ot.opcode;
3660 BUG_ON(!work_handlers[opcode]);
3661 ret = work_handlers[opcode](dev, skb);
3664 process_timedout_eps();
3668 static DECLARE_WORK(skb_work, process_work);
3670 static void ep_timeout(unsigned long arg)
3672 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
3675 spin_lock(&timeout_lock);
3676 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
3678 * Only insert if it is not already on the list.
3680 if (!ep->entry.next) {
3681 list_add_tail(&ep->entry, &timeout_list);
3685 spin_unlock(&timeout_lock);
3687 queue_work(workq, &skb_work);
3691 * All the CM events are handled on a work queue to have a safe context.
3693 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
3697 * Save dev in the skb->cb area.
3699 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
3702 * Queue the skb and schedule the worker thread.
3704 skb_queue_tail(&rxq, skb);
3705 queue_work(workq, &skb_work);
3709 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3711 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
3713 if (rpl->status != CPL_ERR_NONE) {
3714 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
3715 "for tid %u\n", rpl->status, GET_TID(rpl));
3721 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3723 struct cpl_fw6_msg *rpl = cplhdr(skb);
3724 struct c4iw_wr_wait *wr_waitp;
3727 PDBG("%s type %u\n", __func__, rpl->type);
3729 switch (rpl->type) {
3730 case FW6_TYPE_WR_RPL:
3731 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
3732 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
3733 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
3735 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
3739 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3743 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
3751 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3753 struct cpl_abort_req_rss *req = cplhdr(skb);
3755 struct tid_info *t = dev->rdev.lldi.tids;
3756 unsigned int tid = GET_TID(req);
3758 ep = lookup_tid(t, tid);
3760 printk(KERN_WARNING MOD
3761 "Abort on non-existent endpoint, tid %d\n", tid);
3765 if (is_neg_adv(req->status)) {
3766 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
3771 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
3775 * Wake up any threads in rdma_init() or rdma_fini().
3776 * However, if we are on MPAv2 and want to retry with MPAv1
3777 * then, don't wake up yet.
3779 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
3780 if (ep->com.state != MPA_REQ_SENT)
3781 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3783 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3789 * Most upcalls from the T4 Core go to sched() to
3790 * schedule the processing on a work queue.
3792 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
3793 [CPL_ACT_ESTABLISH] = sched,
3794 [CPL_ACT_OPEN_RPL] = sched,
3795 [CPL_RX_DATA] = sched,
3796 [CPL_ABORT_RPL_RSS] = sched,
3797 [CPL_ABORT_RPL] = sched,
3798 [CPL_PASS_OPEN_RPL] = sched,
3799 [CPL_CLOSE_LISTSRV_RPL] = sched,
3800 [CPL_PASS_ACCEPT_REQ] = sched,
3801 [CPL_PASS_ESTABLISH] = sched,
3802 [CPL_PEER_CLOSE] = sched,
3803 [CPL_CLOSE_CON_RPL] = sched,
3804 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
3805 [CPL_RDMA_TERMINATE] = sched,
3806 [CPL_FW4_ACK] = sched,
3807 [CPL_SET_TCB_RPL] = set_tcb_rpl,
3808 [CPL_FW6_MSG] = fw6_msg,
3809 [CPL_RX_PKT] = sched
3812 int __init c4iw_cm_init(void)
3814 spin_lock_init(&timeout_lock);
3815 skb_queue_head_init(&rxq);
3817 workq = create_singlethread_workqueue("iw_cxgb4");
3824 void __exit c4iw_cm_term(void)
3826 WARN_ON(!list_empty(&timeout_list));
3827 flush_workqueue(workq);
3828 destroy_workqueue(workq);