2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
50 #include <rdma/ib_addr.h>
54 static char *states[] = {
71 module_param(nocong, int, 0644);
72 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
74 static int enable_ecn;
75 module_param(enable_ecn, int, 0644);
76 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
78 static int dack_mode = 1;
79 module_param(dack_mode, int, 0644);
80 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
82 uint c4iw_max_read_depth = 32;
83 module_param(c4iw_max_read_depth, int, 0644);
84 MODULE_PARM_DESC(c4iw_max_read_depth,
85 "Per-connection max ORD/IRD (default=32)");
87 static int enable_tcp_timestamps;
88 module_param(enable_tcp_timestamps, int, 0644);
89 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
91 static int enable_tcp_sack;
92 module_param(enable_tcp_sack, int, 0644);
93 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
95 static int enable_tcp_window_scaling = 1;
96 module_param(enable_tcp_window_scaling, int, 0644);
97 MODULE_PARM_DESC(enable_tcp_window_scaling,
98 "Enable tcp window scaling (default=1)");
101 module_param(c4iw_debug, int, 0644);
102 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
104 static int peer2peer = 1;
105 module_param(peer2peer, int, 0644);
106 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
108 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
109 module_param(p2p_type, int, 0644);
110 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
111 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
113 static int ep_timeout_secs = 60;
114 module_param(ep_timeout_secs, int, 0644);
115 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
116 "in seconds (default=60)");
118 static int mpa_rev = 1;
119 module_param(mpa_rev, int, 0644);
120 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
121 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
122 " compliant (default=1)");
124 static int markers_enabled;
125 module_param(markers_enabled, int, 0644);
126 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
128 static int crc_enabled = 1;
129 module_param(crc_enabled, int, 0644);
130 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
132 static int rcv_win = 256 * 1024;
133 module_param(rcv_win, int, 0644);
134 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
136 static int snd_win = 128 * 1024;
137 module_param(snd_win, int, 0644);
138 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
140 static struct workqueue_struct *workq;
142 static struct sk_buff_head rxq;
144 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
145 static void ep_timeout(unsigned long arg);
146 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
148 static LIST_HEAD(timeout_list);
149 static spinlock_t timeout_lock;
151 static void deref_qp(struct c4iw_ep *ep)
153 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
154 clear_bit(QP_REFERENCED, &ep->com.flags);
157 static void ref_qp(struct c4iw_ep *ep)
159 set_bit(QP_REFERENCED, &ep->com.flags);
160 c4iw_qp_add_ref(&ep->com.qp->ibqp);
163 static void start_ep_timer(struct c4iw_ep *ep)
165 PDBG("%s ep %p\n", __func__, ep);
166 if (timer_pending(&ep->timer)) {
167 pr_err("%s timer already started! ep %p\n",
171 clear_bit(TIMEOUT, &ep->com.flags);
172 c4iw_get_ep(&ep->com);
173 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
174 ep->timer.data = (unsigned long)ep;
175 ep->timer.function = ep_timeout;
176 add_timer(&ep->timer);
179 static int stop_ep_timer(struct c4iw_ep *ep)
181 PDBG("%s ep %p stopping\n", __func__, ep);
182 del_timer_sync(&ep->timer);
183 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
184 c4iw_put_ep(&ep->com);
190 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
191 struct l2t_entry *l2e)
195 if (c4iw_fatal_error(rdev)) {
197 PDBG("%s - device in error state - dropping\n", __func__);
200 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
203 return error < 0 ? error : 0;
206 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
210 if (c4iw_fatal_error(rdev)) {
212 PDBG("%s - device in error state - dropping\n", __func__);
215 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
218 return error < 0 ? error : 0;
221 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
223 struct cpl_tid_release *req;
225 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
228 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
229 INIT_TP_WR(req, hwtid);
230 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
231 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
232 c4iw_ofld_send(rdev, skb);
236 static void set_emss(struct c4iw_ep *ep, u16 opt)
238 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
239 sizeof(struct iphdr) - sizeof(struct tcphdr);
241 if (GET_TCPOPT_TSTAMP(opt))
246 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
247 GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
248 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
252 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
254 enum c4iw_ep_state state;
256 mutex_lock(&epc->mutex);
258 mutex_unlock(&epc->mutex);
262 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
267 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
269 mutex_lock(&epc->mutex);
270 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
271 __state_set(epc, new);
272 mutex_unlock(&epc->mutex);
276 static void *alloc_ep(int size, gfp_t gfp)
278 struct c4iw_ep_common *epc;
280 epc = kzalloc(size, gfp);
282 kref_init(&epc->kref);
283 mutex_init(&epc->mutex);
284 c4iw_init_wr_wait(&epc->wr_wait);
286 PDBG("%s alloc ep %p\n", __func__, epc);
290 void _c4iw_free_ep(struct kref *kref)
294 ep = container_of(kref, struct c4iw_ep, com.kref);
295 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
296 if (test_bit(QP_REFERENCED, &ep->com.flags))
298 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
299 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
300 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
301 dst_release(ep->dst);
302 cxgb4_l2t_release(ep->l2t);
304 if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
305 print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
306 iwpm_remove_mapinfo(&ep->com.local_addr,
307 &ep->com.mapped_local_addr);
308 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
313 static void release_ep_resources(struct c4iw_ep *ep)
315 set_bit(RELEASE_RESOURCES, &ep->com.flags);
316 c4iw_put_ep(&ep->com);
319 static int status2errno(int status)
324 case CPL_ERR_CONN_RESET:
326 case CPL_ERR_ARP_MISS:
327 return -EHOSTUNREACH;
328 case CPL_ERR_CONN_TIMEDOUT:
330 case CPL_ERR_TCAM_FULL:
332 case CPL_ERR_CONN_EXIST:
340 * Try and reuse skbs already allocated...
342 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
344 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
347 skb_reset_transport_header(skb);
349 skb = alloc_skb(len, gfp);
351 t4_set_arp_err_handler(skb, NULL, NULL);
355 static struct net_device *get_real_dev(struct net_device *egress_dev)
357 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
360 static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
364 egress_dev = get_real_dev(egress_dev);
365 for (i = 0; i < dev->rdev.lldi.nports; i++)
366 if (dev->rdev.lldi.ports[i] == egress_dev)
371 static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
372 __u8 *peer_ip, __be16 local_port,
373 __be16 peer_port, u8 tos,
376 struct dst_entry *dst = NULL;
378 if (IS_ENABLED(CONFIG_IPV6)) {
381 memset(&fl6, 0, sizeof(fl6));
382 memcpy(&fl6.daddr, peer_ip, 16);
383 memcpy(&fl6.saddr, local_ip, 16);
384 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
385 fl6.flowi6_oif = sin6_scope_id;
386 dst = ip6_route_output(&init_net, NULL, &fl6);
389 if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
390 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
400 static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
401 __be32 peer_ip, __be16 local_port,
402 __be16 peer_port, u8 tos)
408 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
409 peer_port, local_port, IPPROTO_TCP,
413 n = dst_neigh_lookup(&rt->dst, &peer_ip);
416 if (!our_interface(dev, n->dev) &&
417 !(n->dev->flags & IFF_LOOPBACK)) {
418 dst_release(&rt->dst);
425 static void arp_failure_discard(void *handle, struct sk_buff *skb)
427 PDBG("%s c4iw_dev %p\n", __func__, handle);
432 * Handle an ARP failure for an active open.
434 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
436 struct c4iw_ep *ep = handle;
438 printk(KERN_ERR MOD "ARP failure duing connect\n");
440 connect_reply_upcall(ep, -EHOSTUNREACH);
441 state_set(&ep->com, DEAD);
442 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
443 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
444 dst_release(ep->dst);
445 cxgb4_l2t_release(ep->l2t);
446 c4iw_put_ep(&ep->com);
450 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
453 static void abort_arp_failure(void *handle, struct sk_buff *skb)
455 struct c4iw_rdev *rdev = handle;
456 struct cpl_abort_req *req = cplhdr(skb);
458 PDBG("%s rdev %p\n", __func__, rdev);
459 req->cmd = CPL_ABORT_NO_RST;
460 c4iw_ofld_send(rdev, skb);
463 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
465 unsigned int flowclen = 80;
466 struct fw_flowc_wr *flowc;
469 skb = get_skb(skb, flowclen, GFP_KERNEL);
470 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
472 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
473 FW_FLOWC_WR_NPARAMS(8));
474 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
475 16)) | FW_WR_FLOWID(ep->hwtid));
477 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
478 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
479 (ep->com.dev->rdev.lldi.pf));
480 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
481 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
482 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
483 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
484 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
485 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
486 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
487 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
488 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
489 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
490 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
491 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
492 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
493 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
494 /* Pad WR to 16 byte boundary */
495 flowc->mnemval[8].mnemonic = 0;
496 flowc->mnemval[8].val = 0;
497 for (i = 0; i < 9; i++) {
498 flowc->mnemval[i].r4[0] = 0;
499 flowc->mnemval[i].r4[1] = 0;
500 flowc->mnemval[i].r4[2] = 0;
503 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
504 c4iw_ofld_send(&ep->com.dev->rdev, skb);
507 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
509 struct cpl_close_con_req *req;
511 int wrlen = roundup(sizeof *req, 16);
513 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
514 skb = get_skb(NULL, wrlen, gfp);
516 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
519 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
520 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
521 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
522 memset(req, 0, wrlen);
523 INIT_TP_WR(req, ep->hwtid);
524 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
526 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
529 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
531 struct cpl_abort_req *req;
532 int wrlen = roundup(sizeof *req, 16);
534 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
535 skb = get_skb(skb, wrlen, gfp);
537 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
541 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
542 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
543 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
544 memset(req, 0, wrlen);
545 INIT_TP_WR(req, ep->hwtid);
546 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
547 req->cmd = CPL_ABORT_SEND_RST;
548 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
552 * c4iw_form_pm_msg - Form a port mapper message with mapping info
554 static void c4iw_form_pm_msg(struct c4iw_ep *ep,
555 struct iwpm_sa_data *pm_msg)
557 memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
558 sizeof(ep->com.local_addr));
559 memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
560 sizeof(ep->com.remote_addr));
564 * c4iw_form_reg_msg - Form a port mapper message with dev info
566 static void c4iw_form_reg_msg(struct c4iw_dev *dev,
567 struct iwpm_dev_data *pm_msg)
569 memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
570 memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
574 static void c4iw_record_pm_msg(struct c4iw_ep *ep,
575 struct iwpm_sa_data *pm_msg)
577 memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
578 sizeof(ep->com.mapped_local_addr));
579 memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
580 sizeof(ep->com.mapped_remote_addr));
583 static void best_mtu(const unsigned short *mtus, unsigned short mtu,
584 unsigned int *idx, int use_ts)
586 unsigned short hdr_size = sizeof(struct iphdr) +
587 sizeof(struct tcphdr) +
589 unsigned short data_size = mtu - hdr_size;
591 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
594 static int send_connect(struct c4iw_ep *ep)
596 struct cpl_act_open_req *req;
597 struct cpl_t5_act_open_req *t5_req;
598 struct cpl_act_open_req6 *req6;
599 struct cpl_t5_act_open_req6 *t5_req6;
603 unsigned int mtu_idx;
606 int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
607 sizeof(struct cpl_act_open_req) :
608 sizeof(struct cpl_t5_act_open_req);
609 int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
610 sizeof(struct cpl_act_open_req6) :
611 sizeof(struct cpl_t5_act_open_req6);
612 struct sockaddr_in *la = (struct sockaddr_in *)
613 &ep->com.mapped_local_addr;
614 struct sockaddr_in *ra = (struct sockaddr_in *)
615 &ep->com.mapped_remote_addr;
616 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
617 &ep->com.mapped_local_addr;
618 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
619 &ep->com.mapped_remote_addr;
622 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
623 roundup(sizev4, 16) :
626 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
628 skb = get_skb(NULL, wrlen, GFP_KERNEL);
630 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
634 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
636 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
637 enable_tcp_timestamps);
638 wscale = compute_wscale(rcv_win);
641 * Specify the largest window that will fit in opt0. The
642 * remainder will be specified in the rx_data_ack.
644 win = ep->rcv_win >> 10;
645 if (win > RCV_BUFSIZ_MASK)
646 win = RCV_BUFSIZ_MASK;
648 opt0 = (nocong ? NO_CONG(1) : 0) |
653 L2T_IDX(ep->l2t->idx) |
654 TX_CHAN(ep->tx_chan) |
655 SMAC_SEL(ep->smac_idx) |
657 ULP_MODE(ULP_MODE_TCPDDP) |
659 opt2 = RX_CHANNEL(0) |
660 CCTRL_ECN(enable_ecn) |
661 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
662 if (enable_tcp_timestamps)
663 opt2 |= TSTAMPS_EN(1);
666 if (wscale && enable_tcp_window_scaling)
667 opt2 |= WND_SCALE_EN(1);
668 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
669 opt2 |= T5_OPT_2_VALID;
670 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
672 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
674 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
675 if (ep->com.remote_addr.ss_family == AF_INET) {
676 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
678 OPCODE_TID(req) = cpu_to_be32(
679 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
680 ((ep->rss_qid << 14) | ep->atid)));
681 req->local_port = la->sin_port;
682 req->peer_port = ra->sin_port;
683 req->local_ip = la->sin_addr.s_addr;
684 req->peer_ip = ra->sin_addr.s_addr;
685 req->opt0 = cpu_to_be64(opt0);
686 req->params = cpu_to_be32(cxgb4_select_ntuple(
687 ep->com.dev->rdev.lldi.ports[0],
689 req->opt2 = cpu_to_be32(opt2);
691 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
694 OPCODE_TID(req6) = cpu_to_be32(
695 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
696 ((ep->rss_qid<<14)|ep->atid)));
697 req6->local_port = la6->sin6_port;
698 req6->peer_port = ra6->sin6_port;
699 req6->local_ip_hi = *((__be64 *)
700 (la6->sin6_addr.s6_addr));
701 req6->local_ip_lo = *((__be64 *)
702 (la6->sin6_addr.s6_addr + 8));
703 req6->peer_ip_hi = *((__be64 *)
704 (ra6->sin6_addr.s6_addr));
705 req6->peer_ip_lo = *((__be64 *)
706 (ra6->sin6_addr.s6_addr + 8));
707 req6->opt0 = cpu_to_be64(opt0);
708 req6->params = cpu_to_be32(cxgb4_select_ntuple(
709 ep->com.dev->rdev.lldi.ports[0],
711 req6->opt2 = cpu_to_be32(opt2);
714 u32 isn = (prandom_u32() & ~7UL) - 1;
716 opt2 |= T5_OPT_2_VALID;
717 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
721 if (ep->com.remote_addr.ss_family == AF_INET) {
722 t5_req = (struct cpl_t5_act_open_req *)
724 INIT_TP_WR(t5_req, 0);
725 OPCODE_TID(t5_req) = cpu_to_be32(
726 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
727 ((ep->rss_qid << 14) | ep->atid)));
728 t5_req->local_port = la->sin_port;
729 t5_req->peer_port = ra->sin_port;
730 t5_req->local_ip = la->sin_addr.s_addr;
731 t5_req->peer_ip = ra->sin_addr.s_addr;
732 t5_req->opt0 = cpu_to_be64(opt0);
733 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
735 ep->com.dev->rdev.lldi.ports[0],
737 t5_req->rsvd = cpu_to_be32(isn);
738 PDBG("%s snd_isn %u\n", __func__,
739 be32_to_cpu(t5_req->rsvd));
740 t5_req->opt2 = cpu_to_be32(opt2);
742 t5_req6 = (struct cpl_t5_act_open_req6 *)
744 INIT_TP_WR(t5_req6, 0);
745 OPCODE_TID(t5_req6) = cpu_to_be32(
746 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
747 ((ep->rss_qid<<14)|ep->atid)));
748 t5_req6->local_port = la6->sin6_port;
749 t5_req6->peer_port = ra6->sin6_port;
750 t5_req6->local_ip_hi = *((__be64 *)
751 (la6->sin6_addr.s6_addr));
752 t5_req6->local_ip_lo = *((__be64 *)
753 (la6->sin6_addr.s6_addr + 8));
754 t5_req6->peer_ip_hi = *((__be64 *)
755 (ra6->sin6_addr.s6_addr));
756 t5_req6->peer_ip_lo = *((__be64 *)
757 (ra6->sin6_addr.s6_addr + 8));
758 t5_req6->opt0 = cpu_to_be64(opt0);
759 t5_req6->params = (__force __be64)cpu_to_be32(
761 ep->com.dev->rdev.lldi.ports[0],
763 t5_req6->rsvd = cpu_to_be32(isn);
764 PDBG("%s snd_isn %u\n", __func__,
765 be32_to_cpu(t5_req6->rsvd));
766 t5_req6->opt2 = cpu_to_be32(opt2);
770 set_bit(ACT_OPEN_REQ, &ep->com.history);
771 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
774 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
778 struct fw_ofld_tx_data_wr *req;
779 struct mpa_message *mpa;
780 struct mpa_v2_conn_params mpa_v2_params;
782 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
784 BUG_ON(skb_cloned(skb));
786 mpalen = sizeof(*mpa) + ep->plen;
787 if (mpa_rev_to_use == 2)
788 mpalen += sizeof(struct mpa_v2_conn_params);
789 wrlen = roundup(mpalen + sizeof *req, 16);
790 skb = get_skb(skb, wrlen, GFP_KERNEL);
792 connect_reply_upcall(ep, -ENOMEM);
795 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
797 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
798 memset(req, 0, wrlen);
799 req->op_to_immdlen = cpu_to_be32(
800 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
802 FW_WR_IMMDLEN(mpalen));
803 req->flowid_len16 = cpu_to_be32(
804 FW_WR_FLOWID(ep->hwtid) |
805 FW_WR_LEN16(wrlen >> 4));
806 req->plen = cpu_to_be32(mpalen);
807 req->tunnel_to_proxy = cpu_to_be32(
808 FW_OFLD_TX_DATA_WR_FLUSH(1) |
809 FW_OFLD_TX_DATA_WR_SHOVE(1));
811 mpa = (struct mpa_message *)(req + 1);
812 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
813 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
814 (markers_enabled ? MPA_MARKERS : 0) |
815 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
816 mpa->private_data_size = htons(ep->plen);
817 mpa->revision = mpa_rev_to_use;
818 if (mpa_rev_to_use == 1) {
819 ep->tried_with_mpa_v1 = 1;
820 ep->retry_with_mpa_v1 = 0;
823 if (mpa_rev_to_use == 2) {
824 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
825 sizeof (struct mpa_v2_conn_params));
826 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
828 mpa_v2_params.ird = htons((u16)ep->ird);
829 mpa_v2_params.ord = htons((u16)ep->ord);
832 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
833 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
835 htons(MPA_V2_RDMA_WRITE_RTR);
836 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
838 htons(MPA_V2_RDMA_READ_RTR);
840 memcpy(mpa->private_data, &mpa_v2_params,
841 sizeof(struct mpa_v2_conn_params));
844 memcpy(mpa->private_data +
845 sizeof(struct mpa_v2_conn_params),
846 ep->mpa_pkt + sizeof(*mpa), ep->plen);
849 memcpy(mpa->private_data,
850 ep->mpa_pkt + sizeof(*mpa), ep->plen);
853 * Reference the mpa skb. This ensures the data area
854 * will remain in memory until the hw acks the tx.
855 * Function fw4_ack() will deref it.
858 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
861 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
863 __state_set(&ep->com, MPA_REQ_SENT);
864 ep->mpa_attr.initiator = 1;
865 ep->snd_seq += mpalen;
869 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
872 struct fw_ofld_tx_data_wr *req;
873 struct mpa_message *mpa;
875 struct mpa_v2_conn_params mpa_v2_params;
877 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
879 mpalen = sizeof(*mpa) + plen;
880 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
881 mpalen += sizeof(struct mpa_v2_conn_params);
882 wrlen = roundup(mpalen + sizeof *req, 16);
884 skb = get_skb(NULL, wrlen, GFP_KERNEL);
886 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
889 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
891 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
892 memset(req, 0, wrlen);
893 req->op_to_immdlen = cpu_to_be32(
894 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
896 FW_WR_IMMDLEN(mpalen));
897 req->flowid_len16 = cpu_to_be32(
898 FW_WR_FLOWID(ep->hwtid) |
899 FW_WR_LEN16(wrlen >> 4));
900 req->plen = cpu_to_be32(mpalen);
901 req->tunnel_to_proxy = cpu_to_be32(
902 FW_OFLD_TX_DATA_WR_FLUSH(1) |
903 FW_OFLD_TX_DATA_WR_SHOVE(1));
905 mpa = (struct mpa_message *)(req + 1);
906 memset(mpa, 0, sizeof(*mpa));
907 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
908 mpa->flags = MPA_REJECT;
909 mpa->revision = ep->mpa_attr.version;
910 mpa->private_data_size = htons(plen);
912 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
913 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
914 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
915 sizeof (struct mpa_v2_conn_params));
916 mpa_v2_params.ird = htons(((u16)ep->ird) |
917 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
919 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
921 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
922 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
923 FW_RI_INIT_P2PTYPE_READ_REQ ?
924 MPA_V2_RDMA_READ_RTR : 0) : 0));
925 memcpy(mpa->private_data, &mpa_v2_params,
926 sizeof(struct mpa_v2_conn_params));
929 memcpy(mpa->private_data +
930 sizeof(struct mpa_v2_conn_params), pdata, plen);
933 memcpy(mpa->private_data, pdata, plen);
936 * Reference the mpa skb again. This ensures the data area
937 * will remain in memory until the hw acks the tx.
938 * Function fw4_ack() will deref it.
941 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
942 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
945 ep->snd_seq += mpalen;
946 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
949 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
952 struct fw_ofld_tx_data_wr *req;
953 struct mpa_message *mpa;
955 struct mpa_v2_conn_params mpa_v2_params;
957 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
959 mpalen = sizeof(*mpa) + plen;
960 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
961 mpalen += sizeof(struct mpa_v2_conn_params);
962 wrlen = roundup(mpalen + sizeof *req, 16);
964 skb = get_skb(NULL, wrlen, GFP_KERNEL);
966 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
969 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
971 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
972 memset(req, 0, wrlen);
973 req->op_to_immdlen = cpu_to_be32(
974 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
976 FW_WR_IMMDLEN(mpalen));
977 req->flowid_len16 = cpu_to_be32(
978 FW_WR_FLOWID(ep->hwtid) |
979 FW_WR_LEN16(wrlen >> 4));
980 req->plen = cpu_to_be32(mpalen);
981 req->tunnel_to_proxy = cpu_to_be32(
982 FW_OFLD_TX_DATA_WR_FLUSH(1) |
983 FW_OFLD_TX_DATA_WR_SHOVE(1));
985 mpa = (struct mpa_message *)(req + 1);
986 memset(mpa, 0, sizeof(*mpa));
987 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
988 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
989 (markers_enabled ? MPA_MARKERS : 0);
990 mpa->revision = ep->mpa_attr.version;
991 mpa->private_data_size = htons(plen);
993 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
994 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
995 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
996 sizeof (struct mpa_v2_conn_params));
997 mpa_v2_params.ird = htons((u16)ep->ird);
998 mpa_v2_params.ord = htons((u16)ep->ord);
999 if (peer2peer && (ep->mpa_attr.p2p_type !=
1000 FW_RI_INIT_P2PTYPE_DISABLED)) {
1001 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1003 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1004 mpa_v2_params.ord |=
1005 htons(MPA_V2_RDMA_WRITE_RTR);
1006 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1007 mpa_v2_params.ord |=
1008 htons(MPA_V2_RDMA_READ_RTR);
1011 memcpy(mpa->private_data, &mpa_v2_params,
1012 sizeof(struct mpa_v2_conn_params));
1015 memcpy(mpa->private_data +
1016 sizeof(struct mpa_v2_conn_params), pdata, plen);
1019 memcpy(mpa->private_data, pdata, plen);
1022 * Reference the mpa skb. This ensures the data area
1023 * will remain in memory until the hw acks the tx.
1024 * Function fw4_ack() will deref it.
1027 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
1029 __state_set(&ep->com, MPA_REP_SENT);
1030 ep->snd_seq += mpalen;
1031 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1034 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1037 struct cpl_act_establish *req = cplhdr(skb);
1038 unsigned int tid = GET_TID(req);
1039 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
1040 struct tid_info *t = dev->rdev.lldi.tids;
1042 ep = lookup_atid(t, atid);
1044 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
1045 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
1047 mutex_lock(&ep->com.mutex);
1048 dst_confirm(ep->dst);
1050 /* setup the hwtid for this connection */
1052 cxgb4_insert_tid(t, ep, tid);
1053 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
1055 ep->snd_seq = be32_to_cpu(req->snd_isn);
1056 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1058 set_emss(ep, ntohs(req->tcp_opt));
1060 /* dealloc the atid */
1061 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1062 cxgb4_free_atid(t, atid);
1063 set_bit(ACT_ESTAB, &ep->com.history);
1065 /* start MPA negotiation */
1066 send_flowc(ep, NULL);
1067 if (ep->retry_with_mpa_v1)
1068 send_mpa_req(ep, skb, 1);
1070 send_mpa_req(ep, skb, mpa_rev);
1071 mutex_unlock(&ep->com.mutex);
1075 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1077 struct iw_cm_event event;
1079 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1080 memset(&event, 0, sizeof(event));
1081 event.event = IW_CM_EVENT_CLOSE;
1082 event.status = status;
1083 if (ep->com.cm_id) {
1084 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1085 ep, ep->com.cm_id, ep->hwtid);
1086 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1087 ep->com.cm_id->rem_ref(ep->com.cm_id);
1088 ep->com.cm_id = NULL;
1089 set_bit(CLOSE_UPCALL, &ep->com.history);
1093 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
1095 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1096 __state_set(&ep->com, ABORTING);
1097 set_bit(ABORT_CONN, &ep->com.history);
1098 return send_abort(ep, skb, gfp);
1101 static void peer_close_upcall(struct c4iw_ep *ep)
1103 struct iw_cm_event event;
1105 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1106 memset(&event, 0, sizeof(event));
1107 event.event = IW_CM_EVENT_DISCONNECT;
1108 if (ep->com.cm_id) {
1109 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1110 ep, ep->com.cm_id, ep->hwtid);
1111 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1112 set_bit(DISCONN_UPCALL, &ep->com.history);
1116 static void peer_abort_upcall(struct c4iw_ep *ep)
1118 struct iw_cm_event event;
1120 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1121 memset(&event, 0, sizeof(event));
1122 event.event = IW_CM_EVENT_CLOSE;
1123 event.status = -ECONNRESET;
1124 if (ep->com.cm_id) {
1125 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
1126 ep->com.cm_id, ep->hwtid);
1127 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1128 ep->com.cm_id->rem_ref(ep->com.cm_id);
1129 ep->com.cm_id = NULL;
1130 set_bit(ABORT_UPCALL, &ep->com.history);
1134 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1136 struct iw_cm_event event;
1138 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
1139 memset(&event, 0, sizeof(event));
1140 event.event = IW_CM_EVENT_CONNECT_REPLY;
1141 event.status = status;
1142 memcpy(&event.local_addr, &ep->com.local_addr,
1143 sizeof(ep->com.local_addr));
1144 memcpy(&event.remote_addr, &ep->com.remote_addr,
1145 sizeof(ep->com.remote_addr));
1147 if ((status == 0) || (status == -ECONNREFUSED)) {
1148 if (!ep->tried_with_mpa_v1) {
1149 /* this means MPA_v2 is used */
1150 event.private_data_len = ep->plen -
1151 sizeof(struct mpa_v2_conn_params);
1152 event.private_data = ep->mpa_pkt +
1153 sizeof(struct mpa_message) +
1154 sizeof(struct mpa_v2_conn_params);
1156 /* this means MPA_v1 is used */
1157 event.private_data_len = ep->plen;
1158 event.private_data = ep->mpa_pkt +
1159 sizeof(struct mpa_message);
1163 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
1165 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1166 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1169 ep->com.cm_id->rem_ref(ep->com.cm_id);
1170 ep->com.cm_id = NULL;
1174 static int connect_request_upcall(struct c4iw_ep *ep)
1176 struct iw_cm_event event;
1179 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1180 memset(&event, 0, sizeof(event));
1181 event.event = IW_CM_EVENT_CONNECT_REQUEST;
1182 memcpy(&event.local_addr, &ep->com.local_addr,
1183 sizeof(ep->com.local_addr));
1184 memcpy(&event.remote_addr, &ep->com.remote_addr,
1185 sizeof(ep->com.remote_addr));
1186 event.provider_data = ep;
1187 if (!ep->tried_with_mpa_v1) {
1188 /* this means MPA_v2 is used */
1189 event.ord = ep->ord;
1190 event.ird = ep->ird;
1191 event.private_data_len = ep->plen -
1192 sizeof(struct mpa_v2_conn_params);
1193 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1194 sizeof(struct mpa_v2_conn_params);
1196 /* this means MPA_v1 is used. Send max supported */
1197 event.ord = cur_max_read_depth(ep->com.dev);
1198 event.ird = cur_max_read_depth(ep->com.dev);
1199 event.private_data_len = ep->plen;
1200 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1202 c4iw_get_ep(&ep->com);
1203 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1206 c4iw_put_ep(&ep->com);
1207 set_bit(CONNREQ_UPCALL, &ep->com.history);
1208 c4iw_put_ep(&ep->parent_ep->com);
1212 static void established_upcall(struct c4iw_ep *ep)
1214 struct iw_cm_event event;
1216 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1217 memset(&event, 0, sizeof(event));
1218 event.event = IW_CM_EVENT_ESTABLISHED;
1219 event.ird = ep->ird;
1220 event.ord = ep->ord;
1221 if (ep->com.cm_id) {
1222 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1223 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1224 set_bit(ESTAB_UPCALL, &ep->com.history);
1228 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1230 struct cpl_rx_data_ack *req;
1231 struct sk_buff *skb;
1232 int wrlen = roundup(sizeof *req, 16);
1234 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1235 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1237 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1242 * If we couldn't specify the entire rcv window at connection setup
1243 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1244 * then add the overage in to the credits returned.
1246 if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
1247 credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
1249 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1250 memset(req, 0, wrlen);
1251 INIT_TP_WR(req, ep->hwtid);
1252 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1254 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
1256 V_RX_DACK_MODE(dack_mode));
1257 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
1258 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1262 #define RELAXED_IRD_NEGOTIATION 1
1264 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1266 struct mpa_message *mpa;
1267 struct mpa_v2_conn_params *mpa_v2_params;
1269 u16 resp_ird, resp_ord;
1270 u8 rtr_mismatch = 0, insuff_ird = 0;
1271 struct c4iw_qp_attributes attrs;
1272 enum c4iw_qp_attr_mask mask;
1276 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1279 * Stop mpa timer. If it expired, then
1280 * we ignore the MPA reply. process_timeout()
1281 * will abort the connection.
1283 if (stop_ep_timer(ep))
1287 * If we get more than the supported amount of private data
1288 * then we must fail this connection.
1290 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1296 * copy the new data into our accumulation buffer.
1298 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1300 ep->mpa_pkt_len += skb->len;
1303 * if we don't even have the mpa message, then bail.
1305 if (ep->mpa_pkt_len < sizeof(*mpa))
1307 mpa = (struct mpa_message *) ep->mpa_pkt;
1309 /* Validate MPA header. */
1310 if (mpa->revision > mpa_rev) {
1311 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1312 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1316 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1321 plen = ntohs(mpa->private_data_size);
1324 * Fail if there's too much private data.
1326 if (plen > MPA_MAX_PRIVATE_DATA) {
1332 * If plen does not account for pkt size
1334 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1339 ep->plen = (u8) plen;
1342 * If we don't have all the pdata yet, then bail.
1343 * We'll continue process when more data arrives.
1345 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1348 if (mpa->flags & MPA_REJECT) {
1349 err = -ECONNREFUSED;
1354 * If we get here we have accumulated the entire mpa
1355 * start reply message including private data. And
1356 * the MPA header is valid.
1358 __state_set(&ep->com, FPDU_MODE);
1359 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1360 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1361 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1362 ep->mpa_attr.version = mpa->revision;
1363 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1365 if (mpa->revision == 2) {
1366 ep->mpa_attr.enhanced_rdma_conn =
1367 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1368 if (ep->mpa_attr.enhanced_rdma_conn) {
1369 mpa_v2_params = (struct mpa_v2_conn_params *)
1370 (ep->mpa_pkt + sizeof(*mpa));
1371 resp_ird = ntohs(mpa_v2_params->ird) &
1372 MPA_V2_IRD_ORD_MASK;
1373 resp_ord = ntohs(mpa_v2_params->ord) &
1374 MPA_V2_IRD_ORD_MASK;
1375 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1376 __func__, resp_ird, resp_ord, ep->ird, ep->ord);
1379 * This is a double-check. Ideally, below checks are
1380 * not required since ird/ord stuff has been taken
1381 * care of in c4iw_accept_cr
1383 if (ep->ird < resp_ord) {
1384 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1385 ep->com.dev->rdev.lldi.max_ordird_qp)
1389 } else if (ep->ird > resp_ord) {
1392 if (ep->ord > resp_ird) {
1393 if (RELAXED_IRD_NEGOTIATION)
1404 if (ntohs(mpa_v2_params->ird) &
1405 MPA_V2_PEER2PEER_MODEL) {
1406 if (ntohs(mpa_v2_params->ord) &
1407 MPA_V2_RDMA_WRITE_RTR)
1408 ep->mpa_attr.p2p_type =
1409 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1410 else if (ntohs(mpa_v2_params->ord) &
1411 MPA_V2_RDMA_READ_RTR)
1412 ep->mpa_attr.p2p_type =
1413 FW_RI_INIT_P2PTYPE_READ_REQ;
1416 } else if (mpa->revision == 1)
1418 ep->mpa_attr.p2p_type = p2p_type;
1420 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1421 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1422 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1423 ep->mpa_attr.recv_marker_enabled,
1424 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1425 ep->mpa_attr.p2p_type, p2p_type);
1428 * If responder's RTR does not match with that of initiator, assign
1429 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1430 * generated when moving QP to RTS state.
1431 * A TERM message will be sent after QP has moved to RTS state
1433 if ((ep->mpa_attr.version == 2) && peer2peer &&
1434 (ep->mpa_attr.p2p_type != p2p_type)) {
1435 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1439 attrs.mpa_attr = ep->mpa_attr;
1440 attrs.max_ird = ep->ird;
1441 attrs.max_ord = ep->ord;
1442 attrs.llp_stream_handle = ep;
1443 attrs.next_state = C4IW_QP_STATE_RTS;
1445 mask = C4IW_QP_ATTR_NEXT_STATE |
1446 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1447 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1449 /* bind QP and TID with INIT_WR */
1450 err = c4iw_modify_qp(ep->com.qp->rhp,
1451 ep->com.qp, mask, &attrs, 1);
1456 * If responder's RTR requirement did not match with what initiator
1457 * supports, generate TERM message
1460 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1461 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1462 attrs.ecode = MPA_NOMATCH_RTR;
1463 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1464 attrs.send_term = 1;
1465 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1466 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1473 * Generate TERM if initiator IRD is not sufficient for responder
1474 * provided ORD. Currently, we do the same behaviour even when
1475 * responder provided IRD is also not sufficient as regards to
1479 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1481 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1482 attrs.ecode = MPA_INSUFF_IRD;
1483 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1484 attrs.send_term = 1;
1485 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1486 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1493 __state_set(&ep->com, ABORTING);
1494 send_abort(ep, skb, GFP_KERNEL);
1496 connect_reply_upcall(ep, err);
1500 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1502 struct mpa_message *mpa;
1503 struct mpa_v2_conn_params *mpa_v2_params;
1506 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1509 * If we get more than the supported amount of private data
1510 * then we must fail this connection.
1512 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1513 (void)stop_ep_timer(ep);
1514 abort_connection(ep, skb, GFP_KERNEL);
1518 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1521 * Copy the new data into our accumulation buffer.
1523 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1525 ep->mpa_pkt_len += skb->len;
1528 * If we don't even have the mpa message, then bail.
1529 * We'll continue process when more data arrives.
1531 if (ep->mpa_pkt_len < sizeof(*mpa))
1534 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1535 mpa = (struct mpa_message *) ep->mpa_pkt;
1538 * Validate MPA Header.
1540 if (mpa->revision > mpa_rev) {
1541 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1542 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1543 (void)stop_ep_timer(ep);
1544 abort_connection(ep, skb, GFP_KERNEL);
1548 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1549 (void)stop_ep_timer(ep);
1550 abort_connection(ep, skb, GFP_KERNEL);
1554 plen = ntohs(mpa->private_data_size);
1557 * Fail if there's too much private data.
1559 if (plen > MPA_MAX_PRIVATE_DATA) {
1560 (void)stop_ep_timer(ep);
1561 abort_connection(ep, skb, GFP_KERNEL);
1566 * If plen does not account for pkt size
1568 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1569 (void)stop_ep_timer(ep);
1570 abort_connection(ep, skb, GFP_KERNEL);
1573 ep->plen = (u8) plen;
1576 * If we don't have all the pdata yet, then bail.
1578 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1582 * If we get here we have accumulated the entire mpa
1583 * start reply message including private data.
1585 ep->mpa_attr.initiator = 0;
1586 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1587 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1588 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1589 ep->mpa_attr.version = mpa->revision;
1590 if (mpa->revision == 1)
1591 ep->tried_with_mpa_v1 = 1;
1592 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1594 if (mpa->revision == 2) {
1595 ep->mpa_attr.enhanced_rdma_conn =
1596 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1597 if (ep->mpa_attr.enhanced_rdma_conn) {
1598 mpa_v2_params = (struct mpa_v2_conn_params *)
1599 (ep->mpa_pkt + sizeof(*mpa));
1600 ep->ird = ntohs(mpa_v2_params->ird) &
1601 MPA_V2_IRD_ORD_MASK;
1602 ep->ord = ntohs(mpa_v2_params->ord) &
1603 MPA_V2_IRD_ORD_MASK;
1604 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1606 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1608 if (ntohs(mpa_v2_params->ord) &
1609 MPA_V2_RDMA_WRITE_RTR)
1610 ep->mpa_attr.p2p_type =
1611 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1612 else if (ntohs(mpa_v2_params->ord) &
1613 MPA_V2_RDMA_READ_RTR)
1614 ep->mpa_attr.p2p_type =
1615 FW_RI_INIT_P2PTYPE_READ_REQ;
1618 } else if (mpa->revision == 1)
1620 ep->mpa_attr.p2p_type = p2p_type;
1622 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1623 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1624 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1625 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1626 ep->mpa_attr.p2p_type);
1629 * If the endpoint timer already expired, then we ignore
1630 * the start request. process_timeout() will abort
1633 if (!stop_ep_timer(ep)) {
1634 __state_set(&ep->com, MPA_REQ_RCVD);
1637 mutex_lock(&ep->parent_ep->com.mutex);
1638 if (ep->parent_ep->com.state != DEAD) {
1639 if (connect_request_upcall(ep))
1640 abort_connection(ep, skb, GFP_KERNEL);
1642 abort_connection(ep, skb, GFP_KERNEL);
1644 mutex_unlock(&ep->parent_ep->com.mutex);
1649 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1652 struct cpl_rx_data *hdr = cplhdr(skb);
1653 unsigned int dlen = ntohs(hdr->len);
1654 unsigned int tid = GET_TID(hdr);
1655 struct tid_info *t = dev->rdev.lldi.tids;
1656 __u8 status = hdr->status;
1659 ep = lookup_tid(t, tid);
1662 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1663 skb_pull(skb, sizeof(*hdr));
1664 skb_trim(skb, dlen);
1665 mutex_lock(&ep->com.mutex);
1667 /* update RX credits */
1668 update_rx_credits(ep, dlen);
1670 switch (ep->com.state) {
1672 ep->rcv_seq += dlen;
1673 disconnect = process_mpa_reply(ep, skb);
1676 ep->rcv_seq += dlen;
1677 process_mpa_request(ep, skb);
1680 struct c4iw_qp_attributes attrs;
1681 BUG_ON(!ep->com.qp);
1683 pr_err("%s Unexpected streaming data." \
1684 " qpid %u ep %p state %d tid %u status %d\n",
1685 __func__, ep->com.qp->wq.sq.qid, ep,
1686 ep->com.state, ep->hwtid, status);
1687 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1688 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1689 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1696 mutex_unlock(&ep->com.mutex);
1698 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1702 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1705 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1707 unsigned int tid = GET_TID(rpl);
1708 struct tid_info *t = dev->rdev.lldi.tids;
1710 ep = lookup_tid(t, tid);
1712 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1715 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1716 mutex_lock(&ep->com.mutex);
1717 switch (ep->com.state) {
1719 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1720 __state_set(&ep->com, DEAD);
1724 printk(KERN_ERR "%s ep %p state %d\n",
1725 __func__, ep, ep->com.state);
1728 mutex_unlock(&ep->com.mutex);
1731 release_ep_resources(ep);
1735 static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1737 struct sk_buff *skb;
1738 struct fw_ofld_connection_wr *req;
1739 unsigned int mtu_idx;
1741 struct sockaddr_in *sin;
1744 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1745 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1746 memset(req, 0, sizeof(*req));
1747 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1748 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1749 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1750 ep->com.dev->rdev.lldi.ports[0],
1752 sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
1753 req->le.lport = sin->sin_port;
1754 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
1755 sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
1756 req->le.pport = sin->sin_port;
1757 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
1758 req->tcb.t_state_to_astid =
1759 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
1760 V_FW_OFLD_CONNECTION_WR_ASTID(atid));
1761 req->tcb.cplrxdataack_cplpassacceptrpl =
1762 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
1763 req->tcb.tx_max = (__force __be32) jiffies;
1764 req->tcb.rcv_adv = htons(1);
1765 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1766 enable_tcp_timestamps);
1767 wscale = compute_wscale(rcv_win);
1770 * Specify the largest window that will fit in opt0. The
1771 * remainder will be specified in the rx_data_ack.
1773 win = ep->rcv_win >> 10;
1774 if (win > RCV_BUFSIZ_MASK)
1775 win = RCV_BUFSIZ_MASK;
1777 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
1778 (nocong ? NO_CONG(1) : 0) |
1783 L2T_IDX(ep->l2t->idx) |
1784 TX_CHAN(ep->tx_chan) |
1785 SMAC_SEL(ep->smac_idx) |
1787 ULP_MODE(ULP_MODE_TCPDDP) |
1789 req->tcb.opt2 = (__force __be32) (PACE(1) |
1790 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1792 CCTRL_ECN(enable_ecn) |
1793 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
1794 if (enable_tcp_timestamps)
1795 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
1796 if (enable_tcp_sack)
1797 req->tcb.opt2 |= (__force __be32) SACK_EN(1);
1798 if (wscale && enable_tcp_window_scaling)
1799 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
1800 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
1801 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
1802 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1803 set_bit(ACT_OFLD_CONN, &ep->com.history);
1804 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1808 * Return whether a failed active open has allocated a TID
1810 static inline int act_open_has_tid(int status)
1812 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1813 status != CPL_ERR_ARP_MISS;
1816 /* Returns whether a CPL status conveys negative advice.
1818 static int is_neg_adv(unsigned int status)
1820 return status == CPL_ERR_RTX_NEG_ADVICE ||
1821 status == CPL_ERR_PERSIST_NEG_ADVICE ||
1822 status == CPL_ERR_KEEPALV_NEG_ADVICE;
1825 static char *neg_adv_str(unsigned int status)
1828 case CPL_ERR_RTX_NEG_ADVICE:
1829 return "Retransmit timeout";
1830 case CPL_ERR_PERSIST_NEG_ADVICE:
1831 return "Persist timeout";
1832 case CPL_ERR_KEEPALV_NEG_ADVICE:
1833 return "Keepalive timeout";
1839 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
1841 ep->snd_win = snd_win;
1842 ep->rcv_win = rcv_win;
1843 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
1846 #define ACT_OPEN_RETRY_COUNT 2
1848 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
1849 struct dst_entry *dst, struct c4iw_dev *cdev,
1852 struct neighbour *n;
1854 struct net_device *pdev;
1856 n = dst_neigh_lookup(dst, peer_ip);
1862 if (n->dev->flags & IFF_LOOPBACK) {
1864 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
1865 else if (IS_ENABLED(CONFIG_IPV6))
1866 for_each_netdev(&init_net, pdev) {
1867 if (ipv6_chk_addr(&init_net,
1868 (struct in6_addr *)peer_ip,
1879 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1883 ep->mtu = pdev->mtu;
1884 ep->tx_chan = cxgb4_port_chan(pdev);
1885 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1886 step = cdev->rdev.lldi.ntxq /
1887 cdev->rdev.lldi.nchan;
1888 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1889 step = cdev->rdev.lldi.nrxq /
1890 cdev->rdev.lldi.nchan;
1891 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1892 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1893 cxgb4_port_idx(pdev) * step];
1894 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
1897 pdev = get_real_dev(n->dev);
1898 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1902 ep->mtu = dst_mtu(dst);
1903 ep->tx_chan = cxgb4_port_chan(pdev);
1904 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1905 step = cdev->rdev.lldi.ntxq /
1906 cdev->rdev.lldi.nchan;
1907 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1908 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1909 step = cdev->rdev.lldi.nrxq /
1910 cdev->rdev.lldi.nchan;
1911 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1912 cxgb4_port_idx(pdev) * step];
1913 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
1916 ep->retry_with_mpa_v1 = 0;
1917 ep->tried_with_mpa_v1 = 0;
1929 static int c4iw_reconnect(struct c4iw_ep *ep)
1932 struct sockaddr_in *laddr = (struct sockaddr_in *)
1933 &ep->com.cm_id->local_addr;
1934 struct sockaddr_in *raddr = (struct sockaddr_in *)
1935 &ep->com.cm_id->remote_addr;
1936 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
1937 &ep->com.cm_id->local_addr;
1938 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
1939 &ep->com.cm_id->remote_addr;
1943 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1944 init_timer(&ep->timer);
1947 * Allocate an active TID to initiate a TCP connection.
1949 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1950 if (ep->atid == -1) {
1951 pr_err("%s - cannot alloc atid.\n", __func__);
1955 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1958 if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
1959 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
1960 raddr->sin_addr.s_addr, laddr->sin_port,
1961 raddr->sin_port, 0);
1963 ra = (__u8 *)&raddr->sin_addr;
1965 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
1966 raddr6->sin6_addr.s6_addr,
1967 laddr6->sin6_port, raddr6->sin6_port, 0,
1968 raddr6->sin6_scope_id);
1970 ra = (__u8 *)&raddr6->sin6_addr;
1973 pr_err("%s - cannot find route.\n", __func__);
1974 err = -EHOSTUNREACH;
1977 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false);
1979 pr_err("%s - cannot alloc l2e.\n", __func__);
1983 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1984 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1987 state_set(&ep->com, CONNECTING);
1990 /* send connect request to rnic */
1991 err = send_connect(ep);
1995 cxgb4_l2t_release(ep->l2t);
1997 dst_release(ep->dst);
1999 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2000 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2003 * remember to send notification to upper layer.
2004 * We are in here so the upper layer is not aware that this is
2005 * re-connect attempt and so, upper layer is still waiting for
2006 * response of 1st connect request.
2008 connect_reply_upcall(ep, -ECONNRESET);
2009 c4iw_put_ep(&ep->com);
2014 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2017 struct cpl_act_open_rpl *rpl = cplhdr(skb);
2018 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
2019 ntohl(rpl->atid_status)));
2020 struct tid_info *t = dev->rdev.lldi.tids;
2021 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
2022 struct sockaddr_in *la;
2023 struct sockaddr_in *ra;
2024 struct sockaddr_in6 *la6;
2025 struct sockaddr_in6 *ra6;
2027 ep = lookup_atid(t, atid);
2028 la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
2029 ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
2030 la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
2031 ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
2033 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
2034 status, status2errno(status));
2036 if (is_neg_adv(status)) {
2037 dev_warn(&dev->rdev.lldi.pdev->dev,
2038 "Connection problems for atid %u status %u (%s)\n",
2039 atid, status, neg_adv_str(status));
2043 set_bit(ACT_OPEN_RPL, &ep->com.history);
2046 * Log interesting failures.
2049 case CPL_ERR_CONN_RESET:
2050 case CPL_ERR_CONN_TIMEDOUT:
2052 case CPL_ERR_TCAM_FULL:
2053 mutex_lock(&dev->rdev.stats.lock);
2054 dev->rdev.stats.tcam_full++;
2055 mutex_unlock(&dev->rdev.stats.lock);
2056 if (ep->com.local_addr.ss_family == AF_INET &&
2057 dev->rdev.lldi.enable_fw_ofld_conn) {
2058 send_fw_act_open_req(ep,
2059 GET_TID_TID(GET_AOPEN_ATID(
2060 ntohl(rpl->atid_status))));
2064 case CPL_ERR_CONN_EXIST:
2065 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2066 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2067 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
2069 cxgb4_free_atid(t, atid);
2070 dst_release(ep->dst);
2071 cxgb4_l2t_release(ep->l2t);
2077 if (ep->com.local_addr.ss_family == AF_INET) {
2078 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2079 atid, status, status2errno(status),
2080 &la->sin_addr.s_addr, ntohs(la->sin_port),
2081 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
2083 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2084 atid, status, status2errno(status),
2085 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2086 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2091 connect_reply_upcall(ep, status2errno(status));
2092 state_set(&ep->com, DEAD);
2094 if (status && act_open_has_tid(status))
2095 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
2097 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
2098 cxgb4_free_atid(t, atid);
2099 dst_release(ep->dst);
2100 cxgb4_l2t_release(ep->l2t);
2101 c4iw_put_ep(&ep->com);
2106 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2108 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
2109 struct tid_info *t = dev->rdev.lldi.tids;
2110 unsigned int stid = GET_TID(rpl);
2111 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
2114 PDBG("%s stid %d lookup failure!\n", __func__, stid);
2117 PDBG("%s ep %p status %d error %d\n", __func__, ep,
2118 rpl->status, status2errno(rpl->status));
2119 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2125 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2127 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
2128 struct tid_info *t = dev->rdev.lldi.tids;
2129 unsigned int stid = GET_TID(rpl);
2130 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
2132 PDBG("%s ep %p\n", __func__, ep);
2133 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2137 static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2138 struct cpl_pass_accept_req *req)
2140 struct cpl_pass_accept_rpl *rpl;
2141 unsigned int mtu_idx;
2145 struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
2148 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2149 BUG_ON(skb_cloned(skb));
2153 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2154 skb_trim(skb, roundup(sizeof(*rpl5), 16));
2156 INIT_TP_WR(rpl5, ep->hwtid);
2158 skb_trim(skb, sizeof(*rpl));
2159 INIT_TP_WR(rpl, ep->hwtid);
2161 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2164 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2165 enable_tcp_timestamps && req->tcpopt.tstamp);
2166 wscale = compute_wscale(rcv_win);
2169 * Specify the largest window that will fit in opt0. The
2170 * remainder will be specified in the rx_data_ack.
2172 win = ep->rcv_win >> 10;
2173 if (win > RCV_BUFSIZ_MASK)
2174 win = RCV_BUFSIZ_MASK;
2175 opt0 = (nocong ? NO_CONG(1) : 0) |
2180 L2T_IDX(ep->l2t->idx) |
2181 TX_CHAN(ep->tx_chan) |
2182 SMAC_SEL(ep->smac_idx) |
2183 DSCP(ep->tos >> 2) |
2184 ULP_MODE(ULP_MODE_TCPDDP) |
2186 opt2 = RX_CHANNEL(0) |
2187 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
2189 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2190 opt2 |= TSTAMPS_EN(1);
2191 if (enable_tcp_sack && req->tcpopt.sack)
2193 if (wscale && enable_tcp_window_scaling)
2194 opt2 |= WND_SCALE_EN(1);
2196 const struct tcphdr *tcph;
2197 u32 hlen = ntohl(req->hdr_len);
2199 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
2201 if (tcph->ece && tcph->cwr)
2202 opt2 |= CCTRL_ECN(1);
2204 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2205 u32 isn = (prandom_u32() & ~7UL) - 1;
2206 opt2 |= T5_OPT_2_VALID;
2207 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
2208 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
2210 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2213 rpl5->iss = cpu_to_be32(isn);
2214 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
2217 rpl->opt0 = cpu_to_be64(opt0);
2218 rpl->opt2 = cpu_to_be32(opt2);
2219 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2220 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
2221 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2226 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2228 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2229 BUG_ON(skb_cloned(skb));
2230 skb_trim(skb, sizeof(struct cpl_tid_release));
2231 release_tid(&dev->rdev, hwtid, skb);
2235 static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
2236 __u8 *local_ip, __u8 *peer_ip,
2237 __be16 *local_port, __be16 *peer_port)
2239 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
2240 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
2241 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
2242 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
2243 struct tcphdr *tcp = (struct tcphdr *)
2244 ((u8 *)(req + 1) + eth_len + ip_len);
2246 if (ip->version == 4) {
2247 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
2248 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
2251 memcpy(peer_ip, &ip->saddr, 4);
2252 memcpy(local_ip, &ip->daddr, 4);
2254 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
2255 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
2258 memcpy(peer_ip, ip6->saddr.s6_addr, 16);
2259 memcpy(local_ip, ip6->daddr.s6_addr, 16);
2261 *peer_port = tcp->source;
2262 *local_port = tcp->dest;
2267 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2269 struct c4iw_ep *child_ep = NULL, *parent_ep;
2270 struct cpl_pass_accept_req *req = cplhdr(skb);
2271 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
2272 struct tid_info *t = dev->rdev.lldi.tids;
2273 unsigned int hwtid = GET_TID(req);
2274 struct dst_entry *dst;
2275 __u8 local_ip[16], peer_ip[16];
2276 __be16 local_port, peer_port;
2278 u16 peer_mss = ntohs(req->tcpopt.mss);
2280 unsigned short hdrs;
2282 parent_ep = lookup_stid(t, stid);
2284 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2288 if (state_read(&parent_ep->com) != LISTEN) {
2289 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
2294 get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port);
2296 /* Find output route */
2298 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2299 , __func__, parent_ep, hwtid,
2300 local_ip, peer_ip, ntohs(local_port),
2301 ntohs(peer_port), peer_mss);
2302 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
2303 local_port, peer_port,
2304 GET_POPEN_TOS(ntohl(req->tos_stid)));
2306 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2307 , __func__, parent_ep, hwtid,
2308 local_ip, peer_ip, ntohs(local_port),
2309 ntohs(peer_port), peer_mss);
2310 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
2311 PASS_OPEN_TOS(ntohl(req->tos_stid)),
2312 ((struct sockaddr_in6 *)
2313 &parent_ep->com.local_addr)->sin6_scope_id);
2316 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
2321 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2323 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
2329 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false);
2331 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
2338 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
2339 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2340 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2341 child_ep->mtu = peer_mss + hdrs;
2343 state_set(&child_ep->com, CONNECTING);
2344 child_ep->com.dev = dev;
2345 child_ep->com.cm_id = NULL;
2347 struct sockaddr_in *sin = (struct sockaddr_in *)
2348 &child_ep->com.local_addr;
2349 sin->sin_family = PF_INET;
2350 sin->sin_port = local_port;
2351 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2352 sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2353 sin->sin_family = PF_INET;
2354 sin->sin_port = peer_port;
2355 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2357 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
2358 &child_ep->com.local_addr;
2359 sin6->sin6_family = PF_INET6;
2360 sin6->sin6_port = local_port;
2361 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2362 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2363 sin6->sin6_family = PF_INET6;
2364 sin6->sin6_port = peer_port;
2365 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2367 c4iw_get_ep(&parent_ep->com);
2368 child_ep->parent_ep = parent_ep;
2369 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
2370 child_ep->dst = dst;
2371 child_ep->hwtid = hwtid;
2373 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
2374 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
2376 init_timer(&child_ep->timer);
2377 cxgb4_insert_tid(t, child_ep, hwtid);
2378 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
2379 accept_cr(child_ep, skb, req);
2380 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2383 reject_cr(dev, hwtid, skb);
2388 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2391 struct cpl_pass_establish *req = cplhdr(skb);
2392 struct tid_info *t = dev->rdev.lldi.tids;
2393 unsigned int tid = GET_TID(req);
2395 ep = lookup_tid(t, tid);
2396 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2397 ep->snd_seq = be32_to_cpu(req->snd_isn);
2398 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2400 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2401 ntohs(req->tcp_opt));
2403 set_emss(ep, ntohs(req->tcp_opt));
2405 dst_confirm(ep->dst);
2406 state_set(&ep->com, MPA_REQ_WAIT);
2408 send_flowc(ep, skb);
2409 set_bit(PASS_ESTAB, &ep->com.history);
2414 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2416 struct cpl_peer_close *hdr = cplhdr(skb);
2418 struct c4iw_qp_attributes attrs;
2421 struct tid_info *t = dev->rdev.lldi.tids;
2422 unsigned int tid = GET_TID(hdr);
2425 ep = lookup_tid(t, tid);
2426 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2427 dst_confirm(ep->dst);
2429 set_bit(PEER_CLOSE, &ep->com.history);
2430 mutex_lock(&ep->com.mutex);
2431 switch (ep->com.state) {
2433 __state_set(&ep->com, CLOSING);
2436 __state_set(&ep->com, CLOSING);
2437 connect_reply_upcall(ep, -ECONNRESET);
2442 * We're gonna mark this puppy DEAD, but keep
2443 * the reference on it until the ULP accepts or
2444 * rejects the CR. Also wake up anyone waiting
2445 * in rdma connection migration (see c4iw_accept_cr()).
2447 __state_set(&ep->com, CLOSING);
2448 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2449 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2452 __state_set(&ep->com, CLOSING);
2453 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2454 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2458 __state_set(&ep->com, CLOSING);
2459 attrs.next_state = C4IW_QP_STATE_CLOSING;
2460 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2461 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2462 if (ret != -ECONNRESET) {
2463 peer_close_upcall(ep);
2471 __state_set(&ep->com, MORIBUND);
2475 (void)stop_ep_timer(ep);
2476 if (ep->com.cm_id && ep->com.qp) {
2477 attrs.next_state = C4IW_QP_STATE_IDLE;
2478 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2479 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2481 close_complete_upcall(ep, 0);
2482 __state_set(&ep->com, DEAD);
2492 mutex_unlock(&ep->com.mutex);
2494 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2496 release_ep_resources(ep);
2500 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2502 struct cpl_abort_req_rss *req = cplhdr(skb);
2504 struct cpl_abort_rpl *rpl;
2505 struct sk_buff *rpl_skb;
2506 struct c4iw_qp_attributes attrs;
2509 struct tid_info *t = dev->rdev.lldi.tids;
2510 unsigned int tid = GET_TID(req);
2512 ep = lookup_tid(t, tid);
2513 if (is_neg_adv(req->status)) {
2514 dev_warn(&dev->rdev.lldi.pdev->dev,
2515 "Negative advice on abort - tid %u status %d (%s)\n",
2516 ep->hwtid, req->status, neg_adv_str(req->status));
2519 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2521 set_bit(PEER_ABORT, &ep->com.history);
2524 * Wake up any threads in rdma_init() or rdma_fini().
2525 * However, this is not needed if com state is just
2528 if (ep->com.state != MPA_REQ_SENT)
2529 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2531 mutex_lock(&ep->com.mutex);
2532 switch (ep->com.state) {
2536 (void)stop_ep_timer(ep);
2539 (void)stop_ep_timer(ep);
2540 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
2541 connect_reply_upcall(ep, -ECONNRESET);
2544 * we just don't send notification upwards because we
2545 * want to retry with mpa_v1 without upper layers even
2548 * do some housekeeping so as to re-initiate the
2551 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2553 ep->retry_with_mpa_v1 = 1;
2565 if (ep->com.cm_id && ep->com.qp) {
2566 attrs.next_state = C4IW_QP_STATE_ERROR;
2567 ret = c4iw_modify_qp(ep->com.qp->rhp,
2568 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2572 "%s - qp <- error failed!\n",
2575 peer_abort_upcall(ep);
2580 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2581 mutex_unlock(&ep->com.mutex);
2587 dst_confirm(ep->dst);
2588 if (ep->com.state != ABORTING) {
2589 __state_set(&ep->com, DEAD);
2590 /* we don't release if we want to retry with mpa_v1 */
2591 if (!ep->retry_with_mpa_v1)
2594 mutex_unlock(&ep->com.mutex);
2596 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
2598 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
2603 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2604 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2605 INIT_TP_WR(rpl, ep->hwtid);
2606 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2607 rpl->cmd = CPL_ABORT_NO_RST;
2608 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2611 release_ep_resources(ep);
2612 else if (ep->retry_with_mpa_v1) {
2613 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
2614 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2615 dst_release(ep->dst);
2616 cxgb4_l2t_release(ep->l2t);
2623 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2626 struct c4iw_qp_attributes attrs;
2627 struct cpl_close_con_rpl *rpl = cplhdr(skb);
2629 struct tid_info *t = dev->rdev.lldi.tids;
2630 unsigned int tid = GET_TID(rpl);
2632 ep = lookup_tid(t, tid);
2634 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2637 /* The cm_id may be null if we failed to connect */
2638 mutex_lock(&ep->com.mutex);
2639 switch (ep->com.state) {
2641 __state_set(&ep->com, MORIBUND);
2644 (void)stop_ep_timer(ep);
2645 if ((ep->com.cm_id) && (ep->com.qp)) {
2646 attrs.next_state = C4IW_QP_STATE_IDLE;
2647 c4iw_modify_qp(ep->com.qp->rhp,
2649 C4IW_QP_ATTR_NEXT_STATE,
2652 close_complete_upcall(ep, 0);
2653 __state_set(&ep->com, DEAD);
2663 mutex_unlock(&ep->com.mutex);
2665 release_ep_resources(ep);
2669 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2671 struct cpl_rdma_terminate *rpl = cplhdr(skb);
2672 struct tid_info *t = dev->rdev.lldi.tids;
2673 unsigned int tid = GET_TID(rpl);
2675 struct c4iw_qp_attributes attrs;
2677 ep = lookup_tid(t, tid);
2680 if (ep && ep->com.qp) {
2681 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2682 ep->com.qp->wq.sq.qid);
2683 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2684 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2685 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2687 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2693 * Upcall from the adapter indicating data has been transmitted.
2694 * For us its just the single MPA request or reply. We can now free
2695 * the skb holding the mpa message.
2697 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2700 struct cpl_fw4_ack *hdr = cplhdr(skb);
2701 u8 credits = hdr->credits;
2702 unsigned int tid = GET_TID(hdr);
2703 struct tid_info *t = dev->rdev.lldi.tids;
2706 ep = lookup_tid(t, tid);
2707 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
2709 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2710 __func__, ep, ep->hwtid, state_read(&ep->com));
2714 dst_confirm(ep->dst);
2716 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2717 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
2718 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2719 kfree_skb(ep->mpa_skb);
2725 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2729 struct c4iw_ep *ep = to_ep(cm_id);
2730 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2732 mutex_lock(&ep->com.mutex);
2733 if (ep->com.state == DEAD) {
2734 mutex_unlock(&ep->com.mutex);
2735 c4iw_put_ep(&ep->com);
2738 set_bit(ULP_REJECT, &ep->com.history);
2739 BUG_ON(ep->com.state != MPA_REQ_RCVD);
2741 abort_connection(ep, NULL, GFP_KERNEL);
2743 err = send_mpa_reject(ep, pdata, pdata_len);
2746 mutex_unlock(&ep->com.mutex);
2748 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2749 c4iw_put_ep(&ep->com);
2753 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2756 struct c4iw_qp_attributes attrs;
2757 enum c4iw_qp_attr_mask mask;
2758 struct c4iw_ep *ep = to_ep(cm_id);
2759 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2760 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2762 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2764 mutex_lock(&ep->com.mutex);
2765 if (ep->com.state == DEAD) {
2770 BUG_ON(ep->com.state != MPA_REQ_RCVD);
2773 set_bit(ULP_ACCEPT, &ep->com.history);
2774 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
2775 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
2776 abort_connection(ep, NULL, GFP_KERNEL);
2781 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2782 if (conn_param->ord > ep->ird) {
2783 if (RELAXED_IRD_NEGOTIATION) {
2786 ep->ird = conn_param->ird;
2787 ep->ord = conn_param->ord;
2788 send_mpa_reject(ep, conn_param->private_data,
2789 conn_param->private_data_len);
2790 abort_connection(ep, NULL, GFP_KERNEL);
2795 if (conn_param->ird < ep->ord) {
2796 if (RELAXED_IRD_NEGOTIATION &&
2797 ep->ord <= h->rdev.lldi.max_ordird_qp) {
2798 conn_param->ird = ep->ord;
2800 abort_connection(ep, NULL, GFP_KERNEL);
2806 ep->ird = conn_param->ird;
2807 ep->ord = conn_param->ord;
2809 if (ep->mpa_attr.version == 1) {
2810 if (peer2peer && ep->ird == 0)
2814 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2815 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
2819 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2821 cm_id->add_ref(cm_id);
2822 ep->com.cm_id = cm_id;
2826 /* bind QP to EP and move to RTS */
2827 attrs.mpa_attr = ep->mpa_attr;
2828 attrs.max_ird = ep->ird;
2829 attrs.max_ord = ep->ord;
2830 attrs.llp_stream_handle = ep;
2831 attrs.next_state = C4IW_QP_STATE_RTS;
2833 /* bind QP and TID with INIT_WR */
2834 mask = C4IW_QP_ATTR_NEXT_STATE |
2835 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2836 C4IW_QP_ATTR_MPA_ATTR |
2837 C4IW_QP_ATTR_MAX_IRD |
2838 C4IW_QP_ATTR_MAX_ORD;
2840 err = c4iw_modify_qp(ep->com.qp->rhp,
2841 ep->com.qp, mask, &attrs, 1);
2844 err = send_mpa_reply(ep, conn_param->private_data,
2845 conn_param->private_data_len);
2849 __state_set(&ep->com, FPDU_MODE);
2850 established_upcall(ep);
2851 mutex_unlock(&ep->com.mutex);
2852 c4iw_put_ep(&ep->com);
2855 ep->com.cm_id = NULL;
2856 abort_connection(ep, NULL, GFP_KERNEL);
2857 cm_id->rem_ref(cm_id);
2859 mutex_unlock(&ep->com.mutex);
2860 c4iw_put_ep(&ep->com);
2864 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2866 struct in_device *ind;
2868 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
2869 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
2871 ind = in_dev_get(dev->rdev.lldi.ports[0]);
2873 return -EADDRNOTAVAIL;
2874 for_primary_ifa(ind) {
2875 laddr->sin_addr.s_addr = ifa->ifa_address;
2876 raddr->sin_addr.s_addr = ifa->ifa_address;
2882 return found ? 0 : -EADDRNOTAVAIL;
2885 static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
2886 unsigned char banned_flags)
2888 struct inet6_dev *idev;
2889 int err = -EADDRNOTAVAIL;
2892 idev = __in6_dev_get(dev);
2894 struct inet6_ifaddr *ifp;
2896 read_lock_bh(&idev->lock);
2897 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2898 if (ifp->scope == IFA_LINK &&
2899 !(ifp->flags & banned_flags)) {
2900 memcpy(addr, &ifp->addr, 16);
2905 read_unlock_bh(&idev->lock);
2911 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2913 struct in6_addr uninitialized_var(addr);
2914 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
2915 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
2917 if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
2918 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
2919 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
2922 return -EADDRNOTAVAIL;
2925 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2927 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2930 struct sockaddr_in *laddr;
2931 struct sockaddr_in *raddr;
2932 struct sockaddr_in6 *laddr6;
2933 struct sockaddr_in6 *raddr6;
2934 struct iwpm_dev_data pm_reg_msg;
2935 struct iwpm_sa_data pm_msg;
2940 if ((conn_param->ord > cur_max_read_depth(dev)) ||
2941 (conn_param->ird > cur_max_read_depth(dev))) {
2945 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2947 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2951 init_timer(&ep->timer);
2952 ep->plen = conn_param->private_data_len;
2954 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2955 conn_param->private_data, ep->plen);
2956 ep->ird = conn_param->ird;
2957 ep->ord = conn_param->ord;
2959 if (peer2peer && ep->ord == 0)
2962 cm_id->add_ref(cm_id);
2964 ep->com.cm_id = cm_id;
2965 ep->com.qp = get_qhp(dev, conn_param->qpn);
2967 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
2972 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
2976 * Allocate an active TID to initiate a TCP connection.
2978 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
2979 if (ep->atid == -1) {
2980 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2984 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
2986 memcpy(&ep->com.local_addr, &cm_id->local_addr,
2987 sizeof(ep->com.local_addr));
2988 memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
2989 sizeof(ep->com.remote_addr));
2991 /* No port mapper available, go with the specified peer information */
2992 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
2993 sizeof(ep->com.mapped_local_addr));
2994 memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
2995 sizeof(ep->com.mapped_remote_addr));
2997 c4iw_form_reg_msg(dev, &pm_reg_msg);
2998 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
3000 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3001 __func__, iwpm_err);
3003 if (iwpm_valid_pid() && !iwpm_err) {
3004 c4iw_form_pm_msg(ep, &pm_msg);
3005 iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
3007 PDBG("%s: Port Mapper query fail (err = %d).\n",
3008 __func__, iwpm_err);
3010 c4iw_record_pm_msg(ep, &pm_msg);
3012 if (iwpm_create_mapinfo(&ep->com.local_addr,
3013 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
3014 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
3018 print_addr(&ep->com, __func__, "add_query/create_mapinfo");
3019 set_bit(RELEASE_MAPINFO, &ep->com.flags);
3021 laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
3022 raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
3023 laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
3024 raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
3026 if (cm_id->remote_addr.ss_family == AF_INET) {
3028 ra = (__u8 *)&raddr->sin_addr;
3031 * Handle loopback requests to INADDR_ANY.
3033 if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
3034 err = pick_local_ipaddrs(dev, cm_id);
3040 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3041 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
3042 ra, ntohs(raddr->sin_port));
3043 ep->dst = find_route(dev, laddr->sin_addr.s_addr,
3044 raddr->sin_addr.s_addr, laddr->sin_port,
3045 raddr->sin_port, 0);
3048 ra = (__u8 *)&raddr6->sin6_addr;
3051 * Handle loopback requests to INADDR_ANY.
3053 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3054 err = pick_local_ip6addrs(dev, cm_id);
3060 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3061 __func__, laddr6->sin6_addr.s6_addr,
3062 ntohs(laddr6->sin6_port),
3063 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
3064 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
3065 raddr6->sin6_addr.s6_addr,
3066 laddr6->sin6_port, raddr6->sin6_port, 0,
3067 raddr6->sin6_scope_id);
3070 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
3071 err = -EHOSTUNREACH;
3075 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
3077 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
3081 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3082 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3085 state_set(&ep->com, CONNECTING);
3088 /* send connect request to rnic */
3089 err = send_connect(ep);
3093 cxgb4_l2t_release(ep->l2t);
3095 dst_release(ep->dst);
3097 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
3098 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
3100 cm_id->rem_ref(cm_id);
3101 c4iw_put_ep(&ep->com);
3106 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3109 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
3110 &ep->com.mapped_local_addr;
3112 c4iw_init_wr_wait(&ep->com.wr_wait);
3113 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3114 ep->stid, &sin6->sin6_addr,
3116 ep->com.dev->rdev.lldi.rxq_ids[0]);
3118 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3122 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3124 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
3128 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3131 struct sockaddr_in *sin = (struct sockaddr_in *)
3132 &ep->com.mapped_local_addr;
3134 if (dev->rdev.lldi.enable_fw_ofld_conn) {
3136 err = cxgb4_create_server_filter(
3137 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3138 sin->sin_addr.s_addr, sin->sin_port, 0,
3139 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3140 if (err == -EBUSY) {
3141 set_current_state(TASK_UNINTERRUPTIBLE);
3142 schedule_timeout(usecs_to_jiffies(100));
3144 } while (err == -EBUSY);
3146 c4iw_init_wr_wait(&ep->com.wr_wait);
3147 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3148 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3149 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3151 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3156 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3158 &sin->sin_addr, ntohs(sin->sin_port));
3162 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3165 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3166 struct c4iw_listen_ep *ep;
3167 struct iwpm_dev_data pm_reg_msg;
3168 struct iwpm_sa_data pm_msg;
3173 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3175 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3179 PDBG("%s ep %p\n", __func__, ep);
3180 cm_id->add_ref(cm_id);
3181 ep->com.cm_id = cm_id;
3183 ep->backlog = backlog;
3184 memcpy(&ep->com.local_addr, &cm_id->local_addr,
3185 sizeof(ep->com.local_addr));
3188 * Allocate a server TID.
3190 if (dev->rdev.lldi.enable_fw_ofld_conn &&
3191 ep->com.local_addr.ss_family == AF_INET)
3192 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3193 cm_id->local_addr.ss_family, ep);
3195 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3196 cm_id->local_addr.ss_family, ep);
3198 if (ep->stid == -1) {
3199 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
3203 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
3205 /* No port mapper available, go with the specified info */
3206 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
3207 sizeof(ep->com.mapped_local_addr));
3209 c4iw_form_reg_msg(dev, &pm_reg_msg);
3210 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
3212 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3213 __func__, iwpm_err);
3215 if (iwpm_valid_pid() && !iwpm_err) {
3216 memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
3217 sizeof(ep->com.local_addr));
3218 iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
3220 PDBG("%s: Port Mapper query fail (err = %d).\n",
3221 __func__, iwpm_err);
3223 memcpy(&ep->com.mapped_local_addr,
3224 &pm_msg.mapped_loc_addr,
3225 sizeof(ep->com.mapped_local_addr));
3227 if (iwpm_create_mapinfo(&ep->com.local_addr,
3228 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
3232 print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
3234 set_bit(RELEASE_MAPINFO, &ep->com.flags);
3235 state_set(&ep->com, LISTEN);
3236 if (ep->com.local_addr.ss_family == AF_INET)
3237 err = create_server4(dev, ep);
3239 err = create_server6(dev, ep);
3241 cm_id->provider_data = ep;
3246 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3247 ep->com.local_addr.ss_family);
3249 cm_id->rem_ref(cm_id);
3250 c4iw_put_ep(&ep->com);
3256 int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3259 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3261 PDBG("%s ep %p\n", __func__, ep);
3264 state_set(&ep->com, DEAD);
3265 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3266 ep->com.local_addr.ss_family == AF_INET) {
3267 err = cxgb4_remove_server_filter(
3268 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3269 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3271 c4iw_init_wr_wait(&ep->com.wr_wait);
3272 err = cxgb4_remove_server(
3273 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3274 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3277 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
3280 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
3281 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3282 ep->com.local_addr.ss_family);
3284 cm_id->rem_ref(cm_id);
3285 c4iw_put_ep(&ep->com);
3289 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3294 struct c4iw_rdev *rdev;
3296 mutex_lock(&ep->com.mutex);
3298 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
3299 states[ep->com.state], abrupt);
3301 rdev = &ep->com.dev->rdev;
3302 if (c4iw_fatal_error(rdev)) {
3304 close_complete_upcall(ep, -EIO);
3305 ep->com.state = DEAD;
3307 switch (ep->com.state) {
3315 ep->com.state = ABORTING;
3317 ep->com.state = CLOSING;
3320 set_bit(CLOSE_SENT, &ep->com.flags);
3323 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3326 (void)stop_ep_timer(ep);
3327 ep->com.state = ABORTING;
3329 ep->com.state = MORIBUND;
3335 PDBG("%s ignoring disconnect ep %p state %u\n",
3336 __func__, ep, ep->com.state);
3345 set_bit(EP_DISC_ABORT, &ep->com.history);
3346 close_complete_upcall(ep, -ECONNRESET);
3347 ret = send_abort(ep, NULL, gfp);
3349 set_bit(EP_DISC_CLOSE, &ep->com.history);
3350 ret = send_halfclose(ep, gfp);
3355 mutex_unlock(&ep->com.mutex);
3357 release_ep_resources(ep);
3361 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3362 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3365 int atid = be32_to_cpu(req->tid);
3367 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3368 (__force u32) req->tid);
3372 switch (req->retval) {
3374 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3375 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3376 send_fw_act_open_req(ep, atid);
3380 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3381 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3382 send_fw_act_open_req(ep, atid);
3387 pr_info("%s unexpected ofld conn wr retval %d\n",
3388 __func__, req->retval);
3391 pr_err("active ofld_connect_wr failure %d atid %d\n",
3393 mutex_lock(&dev->rdev.stats.lock);
3394 dev->rdev.stats.act_ofld_conn_fails++;
3395 mutex_unlock(&dev->rdev.stats.lock);
3396 connect_reply_upcall(ep, status2errno(req->retval));
3397 state_set(&ep->com, DEAD);
3398 remove_handle(dev, &dev->atid_idr, atid);
3399 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3400 dst_release(ep->dst);
3401 cxgb4_l2t_release(ep->l2t);
3402 c4iw_put_ep(&ep->com);
3405 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3406 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3408 struct sk_buff *rpl_skb;
3409 struct cpl_pass_accept_req *cpl;
3412 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
3415 PDBG("%s passive open failure %d\n", __func__, req->retval);
3416 mutex_lock(&dev->rdev.stats.lock);
3417 dev->rdev.stats.pas_ofld_conn_fails++;
3418 mutex_unlock(&dev->rdev.stats.lock);
3421 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3422 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
3423 (__force u32) htonl(
3424 (__force u32) req->tid)));
3425 ret = pass_accept_req(dev, rpl_skb);
3432 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3434 struct cpl_fw6_msg *rpl = cplhdr(skb);
3435 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3437 switch (rpl->type) {
3439 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3441 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3442 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3443 switch (req->t_state) {
3445 active_ofld_conn_reply(dev, skb, req);
3448 passive_ofld_conn_reply(dev, skb, req);
3451 pr_err("%s unexpected ofld conn wr state %d\n",
3452 __func__, req->t_state);
3460 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3463 u16 vlantag, len, hdr_len, eth_hdr_len;
3465 struct cpl_rx_pkt *cpl = cplhdr(skb);
3466 struct cpl_pass_accept_req *req;
3467 struct tcp_options_received tmp_opt;
3468 struct c4iw_dev *dev;
3470 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3471 /* Store values from cpl_rx_pkt in temporary location. */
3472 vlantag = (__force u16) cpl->vlan;
3473 len = (__force u16) cpl->len;
3474 l2info = (__force u32) cpl->l2info;
3475 hdr_len = (__force u16) cpl->hdr_len;
3478 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3481 * We need to parse the TCP options from SYN packet.
3482 * to generate cpl_pass_accept_req.
3484 memset(&tmp_opt, 0, sizeof(tmp_opt));
3485 tcp_clear_options(&tmp_opt);
3486 tcp_parse_options(skb, &tmp_opt, 0, NULL);
3488 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3489 memset(req, 0, sizeof(*req));
3490 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
3491 V_SYN_MAC_IDX(G_RX_MACIDX(
3492 (__force int) htonl(l2info))) |
3494 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3495 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
3496 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
3497 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
3498 (__force int) htonl(l2info))) |
3499 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
3500 (__force int) htons(hdr_len))) |
3501 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
3502 (__force int) htons(hdr_len))) |
3503 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
3504 req->vlan = (__force __be16) vlantag;
3505 req->len = (__force __be16) len;
3506 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
3507 PASS_OPEN_TOS(tos));
3508 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3509 if (tmp_opt.wscale_ok)
3510 req->tcpopt.wsf = tmp_opt.snd_wscale;
3511 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3512 if (tmp_opt.sack_ok)
3513 req->tcpopt.sack = 1;
3514 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3518 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3519 __be32 laddr, __be16 lport,
3520 __be32 raddr, __be16 rport,
3521 u32 rcv_isn, u32 filter, u16 window,
3522 u32 rss_qid, u8 port_id)
3524 struct sk_buff *req_skb;
3525 struct fw_ofld_connection_wr *req;
3526 struct cpl_pass_accept_req *cpl = cplhdr(skb);
3529 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3530 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3531 memset(req, 0, sizeof(*req));
3532 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
3533 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
3534 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
3535 req->le.filter = (__force __be32) filter;
3536 req->le.lport = lport;
3537 req->le.pport = rport;
3538 req->le.u.ipv4.lip = laddr;
3539 req->le.u.ipv4.pip = raddr;
3540 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3541 req->tcb.rcv_adv = htons(window);
3542 req->tcb.t_state_to_astid =
3543 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
3544 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
3545 V_FW_OFLD_CONNECTION_WR_ASTID(
3546 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
3549 * We store the qid in opt2 which will be used by the firmware
3550 * to send us the wr response.
3552 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
3555 * We initialize the MSS index in TCB to 0xF.
3556 * So that when driver sends cpl_pass_accept_rpl
3557 * TCB picks up the correct value. If this was 0
3558 * TP will ignore any value > 0 for MSS index.
3560 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
3561 req->cookie = (unsigned long)skb;
3563 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3564 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3566 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
3574 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3575 * messages when a filter is being used instead of server to
3576 * redirect a syn packet. When packets hit filter they are redirected
3577 * to the offload queue and driver tries to establish the connection
3578 * using firmware work request.
3580 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3583 unsigned int filter;
3584 struct ethhdr *eh = NULL;
3585 struct vlan_ethhdr *vlan_eh = NULL;
3587 struct tcphdr *tcph;
3588 struct rss_header *rss = (void *)skb->data;
3589 struct cpl_rx_pkt *cpl = (void *)skb->data;
3590 struct cpl_pass_accept_req *req = (void *)(rss + 1);
3591 struct l2t_entry *e;
3592 struct dst_entry *dst;
3593 struct c4iw_ep *lep;
3595 struct port_info *pi;
3596 struct net_device *pdev;
3597 u16 rss_qid, eth_hdr_len;
3600 struct neighbour *neigh;
3602 /* Drop all non-SYN packets */
3603 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
3607 * Drop all packets which did not hit the filter.
3608 * Unlikely to happen.
3610 if (!(rss->filter_hit && rss->filter_tid))
3614 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3616 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3618 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3620 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3624 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3625 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
3626 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
3627 if (eth_hdr_len == ETH_HLEN) {
3628 eh = (struct ethhdr *)(req + 1);
3629 iph = (struct iphdr *)(eh + 1);
3631 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3632 iph = (struct iphdr *)(vlan_eh + 1);
3633 skb->vlan_tci = ntohs(cpl->vlan);
3636 if (iph->version != 0x4)
3639 tcph = (struct tcphdr *)(iph + 1);
3640 skb_set_network_header(skb, (void *)iph - (void *)rss);
3641 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3644 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3645 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3646 ntohs(tcph->source), iph->tos);
3648 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3651 pr_err("%s - failed to find dst entry!\n",
3655 neigh = dst_neigh_lookup_skb(dst, skb);
3658 pr_err("%s - failed to allocate neigh!\n",
3663 if (neigh->dev->flags & IFF_LOOPBACK) {
3664 pdev = ip_dev_find(&init_net, iph->daddr);
3665 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3667 pi = (struct port_info *)netdev_priv(pdev);
3668 tx_chan = cxgb4_port_chan(pdev);
3671 pdev = get_real_dev(neigh->dev);
3672 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3674 pi = (struct port_info *)netdev_priv(pdev);
3675 tx_chan = cxgb4_port_chan(pdev);
3677 neigh_release(neigh);
3679 pr_err("%s - failed to allocate l2t entry!\n",
3684 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3685 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3686 window = (__force u16) htons((__force u16)tcph->window);
3688 /* Calcuate filter portion for LE region. */
3689 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3690 dev->rdev.lldi.ports[0],
3694 * Synthesize the cpl_pass_accept_req. We have everything except the
3695 * TID. Once firmware sends a reply with TID we update the TID field
3696 * in cpl and pass it through the regular cpl_pass_accept_req path.
3698 build_cpl_pass_accept_req(skb, stid, iph->tos);
3699 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3700 tcph->source, ntohl(tcph->seq), filter, window,
3701 rss_qid, pi->port_id);
3702 cxgb4_l2t_release(e);
3710 * These are the real handlers that are called from a
3713 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
3714 [CPL_ACT_ESTABLISH] = act_establish,
3715 [CPL_ACT_OPEN_RPL] = act_open_rpl,
3716 [CPL_RX_DATA] = rx_data,
3717 [CPL_ABORT_RPL_RSS] = abort_rpl,
3718 [CPL_ABORT_RPL] = abort_rpl,
3719 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
3720 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
3721 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
3722 [CPL_PASS_ESTABLISH] = pass_establish,
3723 [CPL_PEER_CLOSE] = peer_close,
3724 [CPL_ABORT_REQ_RSS] = peer_abort,
3725 [CPL_CLOSE_CON_RPL] = close_con_rpl,
3726 [CPL_RDMA_TERMINATE] = terminate,
3727 [CPL_FW4_ACK] = fw4_ack,
3728 [CPL_FW6_MSG] = deferred_fw6_msg,
3729 [CPL_RX_PKT] = rx_pkt
3732 static void process_timeout(struct c4iw_ep *ep)
3734 struct c4iw_qp_attributes attrs;
3737 mutex_lock(&ep->com.mutex);
3738 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
3740 set_bit(TIMEDOUT, &ep->com.history);
3741 switch (ep->com.state) {
3743 __state_set(&ep->com, ABORTING);
3744 connect_reply_upcall(ep, -ETIMEDOUT);
3747 __state_set(&ep->com, ABORTING);
3751 if (ep->com.cm_id && ep->com.qp) {
3752 attrs.next_state = C4IW_QP_STATE_ERROR;
3753 c4iw_modify_qp(ep->com.qp->rhp,
3754 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
3757 __state_set(&ep->com, ABORTING);
3758 close_complete_upcall(ep, -ETIMEDOUT);
3764 * These states are expected if the ep timed out at the same
3765 * time as another thread was calling stop_ep_timer().
3766 * So we silently do nothing for these states.
3771 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3772 __func__, ep, ep->hwtid, ep->com.state);
3776 abort_connection(ep, NULL, GFP_KERNEL);
3777 mutex_unlock(&ep->com.mutex);
3778 c4iw_put_ep(&ep->com);
3781 static void process_timedout_eps(void)
3785 spin_lock_irq(&timeout_lock);
3786 while (!list_empty(&timeout_list)) {
3787 struct list_head *tmp;
3789 tmp = timeout_list.next;
3793 spin_unlock_irq(&timeout_lock);
3794 ep = list_entry(tmp, struct c4iw_ep, entry);
3795 process_timeout(ep);
3796 spin_lock_irq(&timeout_lock);
3798 spin_unlock_irq(&timeout_lock);
3801 static void process_work(struct work_struct *work)
3803 struct sk_buff *skb = NULL;
3804 struct c4iw_dev *dev;
3805 struct cpl_act_establish *rpl;
3806 unsigned int opcode;
3809 process_timedout_eps();
3810 while ((skb = skb_dequeue(&rxq))) {
3812 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3813 opcode = rpl->ot.opcode;
3815 BUG_ON(!work_handlers[opcode]);
3816 ret = work_handlers[opcode](dev, skb);
3819 process_timedout_eps();
3823 static DECLARE_WORK(skb_work, process_work);
3825 static void ep_timeout(unsigned long arg)
3827 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
3830 spin_lock(&timeout_lock);
3831 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
3833 * Only insert if it is not already on the list.
3835 if (!ep->entry.next) {
3836 list_add_tail(&ep->entry, &timeout_list);
3840 spin_unlock(&timeout_lock);
3842 queue_work(workq, &skb_work);
3846 * All the CM events are handled on a work queue to have a safe context.
3848 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
3852 * Save dev in the skb->cb area.
3854 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
3857 * Queue the skb and schedule the worker thread.
3859 skb_queue_tail(&rxq, skb);
3860 queue_work(workq, &skb_work);
3864 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3866 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
3868 if (rpl->status != CPL_ERR_NONE) {
3869 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
3870 "for tid %u\n", rpl->status, GET_TID(rpl));
3876 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3878 struct cpl_fw6_msg *rpl = cplhdr(skb);
3879 struct c4iw_wr_wait *wr_waitp;
3882 PDBG("%s type %u\n", __func__, rpl->type);
3884 switch (rpl->type) {
3885 case FW6_TYPE_WR_RPL:
3886 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
3887 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
3888 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
3890 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
3894 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3898 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
3906 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3908 struct cpl_abort_req_rss *req = cplhdr(skb);
3910 struct tid_info *t = dev->rdev.lldi.tids;
3911 unsigned int tid = GET_TID(req);
3913 ep = lookup_tid(t, tid);
3915 printk(KERN_WARNING MOD
3916 "Abort on non-existent endpoint, tid %d\n", tid);
3920 if (is_neg_adv(req->status)) {
3921 dev_warn(&dev->rdev.lldi.pdev->dev,
3922 "Negative advice on abort - tid %u status %d (%s)\n",
3923 ep->hwtid, req->status, neg_adv_str(req->status));
3927 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
3931 * Wake up any threads in rdma_init() or rdma_fini().
3932 * However, if we are on MPAv2 and want to retry with MPAv1
3933 * then, don't wake up yet.
3935 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
3936 if (ep->com.state != MPA_REQ_SENT)
3937 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3939 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3945 * Most upcalls from the T4 Core go to sched() to
3946 * schedule the processing on a work queue.
3948 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
3949 [CPL_ACT_ESTABLISH] = sched,
3950 [CPL_ACT_OPEN_RPL] = sched,
3951 [CPL_RX_DATA] = sched,
3952 [CPL_ABORT_RPL_RSS] = sched,
3953 [CPL_ABORT_RPL] = sched,
3954 [CPL_PASS_OPEN_RPL] = sched,
3955 [CPL_CLOSE_LISTSRV_RPL] = sched,
3956 [CPL_PASS_ACCEPT_REQ] = sched,
3957 [CPL_PASS_ESTABLISH] = sched,
3958 [CPL_PEER_CLOSE] = sched,
3959 [CPL_CLOSE_CON_RPL] = sched,
3960 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
3961 [CPL_RDMA_TERMINATE] = sched,
3962 [CPL_FW4_ACK] = sched,
3963 [CPL_SET_TCB_RPL] = set_tcb_rpl,
3964 [CPL_FW6_MSG] = fw6_msg,
3965 [CPL_RX_PKT] = sched
3968 int __init c4iw_cm_init(void)
3970 spin_lock_init(&timeout_lock);
3971 skb_queue_head_init(&rxq);
3973 workq = create_singlethread_workqueue("iw_cxgb4");
3980 void c4iw_cm_term(void)
3982 WARN_ON(!list_empty(&timeout_list));
3983 flush_workqueue(workq);
3984 destroy_workqueue(workq);