Merge remote-tracking branches 'spi/topic/bcm2835', 'spi/topic/bcm63xx', 'spi/topic...
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb4 / cm.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
1cab775c 41#include <linux/if_vlan.h>
cfdda9d7
SW
42
43#include <net/neighbour.h>
44#include <net/netevent.h>
45#include <net/route.h>
1cab775c 46#include <net/tcp.h>
830662f6
VP
47#include <net/ip6_route.h>
48#include <net/addrconf.h>
cfdda9d7
SW
49
50#include "iw_cxgb4.h"
51
52static char *states[] = {
53 "idle",
54 "listen",
55 "connecting",
56 "mpa_wait_req",
57 "mpa_req_sent",
58 "mpa_req_rcvd",
59 "mpa_rep_sent",
60 "fpdu_mode",
61 "aborting",
62 "closing",
63 "moribund",
64 "dead",
65 NULL,
66};
67
5be78ee9
VP
68static int nocong;
69module_param(nocong, int, 0644);
70MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
71
72static int enable_ecn;
73module_param(enable_ecn, int, 0644);
74MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
75
b52fe09e 76static int dack_mode = 1;
ba6d3925 77module_param(dack_mode, int, 0644);
b52fe09e 78MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
ba6d3925 79
be4c9bad
RD
80int c4iw_max_read_depth = 8;
81module_param(c4iw_max_read_depth, int, 0644);
82MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
83
cfdda9d7
SW
84static int enable_tcp_timestamps;
85module_param(enable_tcp_timestamps, int, 0644);
86MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
87
88static int enable_tcp_sack;
89module_param(enable_tcp_sack, int, 0644);
90MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
91
92static int enable_tcp_window_scaling = 1;
93module_param(enable_tcp_window_scaling, int, 0644);
94MODULE_PARM_DESC(enable_tcp_window_scaling,
95 "Enable tcp window scaling (default=1)");
96
97int c4iw_debug;
98module_param(c4iw_debug, int, 0644);
99MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
100
101static int peer2peer;
102module_param(peer2peer, int, 0644);
103MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
104
105static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
106module_param(p2p_type, int, 0644);
107MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
108 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
109
110static int ep_timeout_secs = 60;
111module_param(ep_timeout_secs, int, 0644);
112MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
113 "in seconds (default=60)");
114
115static int mpa_rev = 1;
116module_param(mpa_rev, int, 0644);
117MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
d2fe99e8
KS
118 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
119 " compliant (default=1)");
cfdda9d7
SW
120
121static int markers_enabled;
122module_param(markers_enabled, int, 0644);
123MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
124
125static int crc_enabled = 1;
126module_param(crc_enabled, int, 0644);
127MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
128
129static int rcv_win = 256 * 1024;
130module_param(rcv_win, int, 0644);
131MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
132
98ae68b7 133static int snd_win = 128 * 1024;
cfdda9d7 134module_param(snd_win, int, 0644);
98ae68b7 135MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
cfdda9d7 136
cfdda9d7 137static struct workqueue_struct *workq;
cfdda9d7
SW
138
139static struct sk_buff_head rxq;
cfdda9d7
SW
140
141static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
142static void ep_timeout(unsigned long arg);
143static void connect_reply_upcall(struct c4iw_ep *ep, int status);
144
be4c9bad
RD
145static LIST_HEAD(timeout_list);
146static spinlock_t timeout_lock;
147
325abead
VP
148static void deref_qp(struct c4iw_ep *ep)
149{
150 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
151 clear_bit(QP_REFERENCED, &ep->com.flags);
152}
153
154static void ref_qp(struct c4iw_ep *ep)
155{
156 set_bit(QP_REFERENCED, &ep->com.flags);
157 c4iw_qp_add_ref(&ep->com.qp->ibqp);
158}
159
cfdda9d7
SW
160static void start_ep_timer(struct c4iw_ep *ep)
161{
162 PDBG("%s ep %p\n", __func__, ep);
163 if (timer_pending(&ep->timer)) {
1ec779cc
VP
164 pr_err("%s timer already started! ep %p\n",
165 __func__, ep);
166 return;
167 }
168 clear_bit(TIMEOUT, &ep->com.flags);
169 c4iw_get_ep(&ep->com);
cfdda9d7
SW
170 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
171 ep->timer.data = (unsigned long)ep;
172 ep->timer.function = ep_timeout;
173 add_timer(&ep->timer);
174}
175
176static void stop_ep_timer(struct c4iw_ep *ep)
177{
1ec779cc 178 PDBG("%s ep %p stopping\n", __func__, ep);
cfdda9d7 179 del_timer_sync(&ep->timer);
1ec779cc
VP
180 if (!test_and_set_bit(TIMEOUT, &ep->com.flags))
181 c4iw_put_ep(&ep->com);
cfdda9d7
SW
182}
183
184static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
185 struct l2t_entry *l2e)
186{
187 int error = 0;
188
189 if (c4iw_fatal_error(rdev)) {
190 kfree_skb(skb);
191 PDBG("%s - device in error state - dropping\n", __func__);
192 return -EIO;
193 }
194 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
195 if (error < 0)
196 kfree_skb(skb);
74594861 197 return error < 0 ? error : 0;
cfdda9d7
SW
198}
199
200int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
201{
202 int error = 0;
203
204 if (c4iw_fatal_error(rdev)) {
205 kfree_skb(skb);
206 PDBG("%s - device in error state - dropping\n", __func__);
207 return -EIO;
208 }
209 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
210 if (error < 0)
211 kfree_skb(skb);
74594861 212 return error < 0 ? error : 0;
cfdda9d7
SW
213}
214
215static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
216{
217 struct cpl_tid_release *req;
218
219 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
220 if (!skb)
221 return;
222 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
223 INIT_TP_WR(req, hwtid);
224 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
225 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
226 c4iw_ofld_send(rdev, skb);
227 return;
228}
229
230static void set_emss(struct c4iw_ep *ep, u16 opt)
231{
232 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
233 ep->mss = ep->emss;
234 if (GET_TCPOPT_TSTAMP(opt))
235 ep->emss -= 12;
236 if (ep->emss < 128)
237 ep->emss = 128;
238 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
239 ep->mss, ep->emss);
240}
241
242static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
243{
cfdda9d7
SW
244 enum c4iw_ep_state state;
245
2f5b48c3 246 mutex_lock(&epc->mutex);
cfdda9d7 247 state = epc->state;
2f5b48c3 248 mutex_unlock(&epc->mutex);
cfdda9d7
SW
249 return state;
250}
251
252static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
253{
254 epc->state = new;
255}
256
257static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
258{
2f5b48c3 259 mutex_lock(&epc->mutex);
cfdda9d7
SW
260 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
261 __state_set(epc, new);
2f5b48c3 262 mutex_unlock(&epc->mutex);
cfdda9d7
SW
263 return;
264}
265
266static void *alloc_ep(int size, gfp_t gfp)
267{
268 struct c4iw_ep_common *epc;
269
270 epc = kzalloc(size, gfp);
271 if (epc) {
272 kref_init(&epc->kref);
2f5b48c3 273 mutex_init(&epc->mutex);
aadc4df3 274 c4iw_init_wr_wait(&epc->wr_wait);
cfdda9d7
SW
275 }
276 PDBG("%s alloc ep %p\n", __func__, epc);
277 return epc;
278}
279
280void _c4iw_free_ep(struct kref *kref)
281{
282 struct c4iw_ep *ep;
283
284 ep = container_of(kref, struct c4iw_ep, com.kref);
285 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
325abead
VP
286 if (test_bit(QP_REFERENCED, &ep->com.flags))
287 deref_qp(ep);
cfdda9d7 288 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
fe7e0a4d 289 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
cfdda9d7
SW
290 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
291 dst_release(ep->dst);
292 cxgb4_l2t_release(ep->l2t);
293 }
294 kfree(ep);
295}
296
297static void release_ep_resources(struct c4iw_ep *ep)
298{
299 set_bit(RELEASE_RESOURCES, &ep->com.flags);
300 c4iw_put_ep(&ep->com);
301}
302
cfdda9d7
SW
303static int status2errno(int status)
304{
305 switch (status) {
306 case CPL_ERR_NONE:
307 return 0;
308 case CPL_ERR_CONN_RESET:
309 return -ECONNRESET;
310 case CPL_ERR_ARP_MISS:
311 return -EHOSTUNREACH;
312 case CPL_ERR_CONN_TIMEDOUT:
313 return -ETIMEDOUT;
314 case CPL_ERR_TCAM_FULL:
315 return -ENOMEM;
316 case CPL_ERR_CONN_EXIST:
317 return -EADDRINUSE;
318 default:
319 return -EIO;
320 }
321}
322
323/*
324 * Try and reuse skbs already allocated...
325 */
326static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
327{
328 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
329 skb_trim(skb, 0);
330 skb_get(skb);
331 skb_reset_transport_header(skb);
332 } else {
333 skb = alloc_skb(len, gfp);
334 }
b38a0ad8 335 t4_set_arp_err_handler(skb, NULL, NULL);
cfdda9d7
SW
336 return skb;
337}
338
830662f6
VP
339static struct net_device *get_real_dev(struct net_device *egress_dev)
340{
341 struct net_device *phys_dev = egress_dev;
342 if (egress_dev->priv_flags & IFF_802_1Q_VLAN)
343 phys_dev = vlan_dev_real_dev(egress_dev);
344 return phys_dev;
345}
346
347static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
348{
349 int i;
350
351 egress_dev = get_real_dev(egress_dev);
352 for (i = 0; i < dev->rdev.lldi.nports; i++)
353 if (dev->rdev.lldi.ports[i] == egress_dev)
354 return 1;
355 return 0;
356}
357
358static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
359 __u8 *peer_ip, __be16 local_port,
360 __be16 peer_port, u8 tos,
361 __u32 sin6_scope_id)
362{
363 struct dst_entry *dst = NULL;
364
365 if (IS_ENABLED(CONFIG_IPV6)) {
366 struct flowi6 fl6;
367
368 memset(&fl6, 0, sizeof(fl6));
369 memcpy(&fl6.daddr, peer_ip, 16);
370 memcpy(&fl6.saddr, local_ip, 16);
371 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
372 fl6.flowi6_oif = sin6_scope_id;
373 dst = ip6_route_output(&init_net, NULL, &fl6);
374 if (!dst)
375 goto out;
376 if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
377 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
378 dst_release(dst);
379 dst = NULL;
380 }
381 }
382
383out:
384 return dst;
385}
386
387static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
cfdda9d7
SW
388 __be32 peer_ip, __be16 local_port,
389 __be16 peer_port, u8 tos)
390{
391 struct rtable *rt;
31e4543d 392 struct flowi4 fl4;
830662f6 393 struct neighbour *n;
78fbfd8a 394
31e4543d 395 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
78fbfd8a
DM
396 peer_port, local_port, IPPROTO_TCP,
397 tos, 0);
b23dd4fe 398 if (IS_ERR(rt))
cfdda9d7 399 return NULL;
830662f6
VP
400 n = dst_neigh_lookup(&rt->dst, &peer_ip);
401 if (!n)
402 return NULL;
403 if (!our_interface(dev, n->dev)) {
404 dst_release(&rt->dst);
405 return NULL;
406 }
407 neigh_release(n);
408 return &rt->dst;
cfdda9d7
SW
409}
410
411static void arp_failure_discard(void *handle, struct sk_buff *skb)
412{
413 PDBG("%s c4iw_dev %p\n", __func__, handle);
414 kfree_skb(skb);
415}
416
417/*
418 * Handle an ARP failure for an active open.
419 */
420static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
421{
422 printk(KERN_ERR MOD "ARP failure duing connect\n");
423 kfree_skb(skb);
424}
425
426/*
427 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
428 * and send it along.
429 */
430static void abort_arp_failure(void *handle, struct sk_buff *skb)
431{
432 struct c4iw_rdev *rdev = handle;
433 struct cpl_abort_req *req = cplhdr(skb);
434
435 PDBG("%s rdev %p\n", __func__, rdev);
436 req->cmd = CPL_ABORT_NO_RST;
437 c4iw_ofld_send(rdev, skb);
438}
439
440static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
441{
442 unsigned int flowclen = 80;
443 struct fw_flowc_wr *flowc;
444 int i;
445
446 skb = get_skb(skb, flowclen, GFP_KERNEL);
447 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
448
449 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
450 FW_FLOWC_WR_NPARAMS(8));
451 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
452 16)) | FW_WR_FLOWID(ep->hwtid));
453
454 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
94788657 455 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
cfdda9d7
SW
456 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
457 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
458 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
459 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
460 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
461 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
462 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
463 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
464 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
465 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
466 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
467 flowc->mnemval[6].val = cpu_to_be32(snd_win);
468 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
469 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
470 /* Pad WR to 16 byte boundary */
471 flowc->mnemval[8].mnemonic = 0;
472 flowc->mnemval[8].val = 0;
473 for (i = 0; i < 9; i++) {
474 flowc->mnemval[i].r4[0] = 0;
475 flowc->mnemval[i].r4[1] = 0;
476 flowc->mnemval[i].r4[2] = 0;
477 }
478
479 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
480 c4iw_ofld_send(&ep->com.dev->rdev, skb);
481}
482
483static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
484{
485 struct cpl_close_con_req *req;
486 struct sk_buff *skb;
487 int wrlen = roundup(sizeof *req, 16);
488
489 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
490 skb = get_skb(NULL, wrlen, gfp);
491 if (!skb) {
492 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
493 return -ENOMEM;
494 }
495 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
496 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
497 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
498 memset(req, 0, wrlen);
499 INIT_TP_WR(req, ep->hwtid);
500 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
501 ep->hwtid));
502 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
503}
504
505static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
506{
507 struct cpl_abort_req *req;
508 int wrlen = roundup(sizeof *req, 16);
509
510 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
511 skb = get_skb(skb, wrlen, gfp);
512 if (!skb) {
513 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
514 __func__);
515 return -ENOMEM;
516 }
517 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
518 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
519 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
520 memset(req, 0, wrlen);
521 INIT_TP_WR(req, ep->hwtid);
522 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
523 req->cmd = CPL_ABORT_SEND_RST;
524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
525}
526
527static int send_connect(struct c4iw_ep *ep)
528{
529 struct cpl_act_open_req *req;
f079af7a 530 struct cpl_t5_act_open_req *t5_req;
830662f6
VP
531 struct cpl_act_open_req6 *req6;
532 struct cpl_t5_act_open_req6 *t5_req6;
cfdda9d7
SW
533 struct sk_buff *skb;
534 u64 opt0;
535 u32 opt2;
536 unsigned int mtu_idx;
537 int wscale;
830662f6
VP
538 int wrlen;
539 int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
540 sizeof(struct cpl_act_open_req) :
541 sizeof(struct cpl_t5_act_open_req);
542 int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
543 sizeof(struct cpl_act_open_req6) :
544 sizeof(struct cpl_t5_act_open_req6);
545 struct sockaddr_in *la = (struct sockaddr_in *)&ep->com.local_addr;
546 struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr;
547 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
548 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
549
550 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
551 roundup(sizev4, 16) :
552 roundup(sizev6, 16);
cfdda9d7
SW
553
554 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
555
556 skb = get_skb(NULL, wrlen, GFP_KERNEL);
557 if (!skb) {
558 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
559 __func__);
560 return -ENOMEM;
561 }
d4f1a5c6 562 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cfdda9d7
SW
563
564 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
565 wscale = compute_wscale(rcv_win);
5be78ee9
VP
566 opt0 = (nocong ? NO_CONG(1) : 0) |
567 KEEP_ALIVE(1) |
ba6d3925 568 DELACK(1) |
cfdda9d7
SW
569 WND_SCALE(wscale) |
570 MSS_IDX(mtu_idx) |
571 L2T_IDX(ep->l2t->idx) |
572 TX_CHAN(ep->tx_chan) |
573 SMAC_SEL(ep->smac_idx) |
574 DSCP(ep->tos) |
b48f3b9c 575 ULP_MODE(ULP_MODE_TCPDDP) |
cfdda9d7
SW
576 RCV_BUFSIZ(rcv_win>>10);
577 opt2 = RX_CHANNEL(0) |
5be78ee9 578 CCTRL_ECN(enable_ecn) |
cfdda9d7
SW
579 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
580 if (enable_tcp_timestamps)
581 opt2 |= TSTAMPS_EN(1);
582 if (enable_tcp_sack)
583 opt2 |= SACK_EN(1);
584 if (wscale && enable_tcp_window_scaling)
585 opt2 |= WND_SCALE_EN(1);
586 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
587
f079af7a 588 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
830662f6
VP
589 if (ep->com.remote_addr.ss_family == AF_INET) {
590 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
591 INIT_TP_WR(req, 0);
592 OPCODE_TID(req) = cpu_to_be32(
593 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
594 ((ep->rss_qid << 14) | ep->atid)));
595 req->local_port = la->sin_port;
596 req->peer_port = ra->sin_port;
597 req->local_ip = la->sin_addr.s_addr;
598 req->peer_ip = ra->sin_addr.s_addr;
599 req->opt0 = cpu_to_be64(opt0);
41b4f86c
KS
600 req->params = cpu_to_be32(cxgb4_select_ntuple(
601 ep->com.dev->rdev.lldi.ports[0],
602 ep->l2t));
830662f6
VP
603 req->opt2 = cpu_to_be32(opt2);
604 } else {
605 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
606
607 INIT_TP_WR(req6, 0);
608 OPCODE_TID(req6) = cpu_to_be32(
609 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
610 ((ep->rss_qid<<14)|ep->atid)));
611 req6->local_port = la6->sin6_port;
612 req6->peer_port = ra6->sin6_port;
613 req6->local_ip_hi = *((__be64 *)
614 (la6->sin6_addr.s6_addr));
615 req6->local_ip_lo = *((__be64 *)
616 (la6->sin6_addr.s6_addr + 8));
617 req6->peer_ip_hi = *((__be64 *)
618 (ra6->sin6_addr.s6_addr));
619 req6->peer_ip_lo = *((__be64 *)
620 (ra6->sin6_addr.s6_addr + 8));
621 req6->opt0 = cpu_to_be64(opt0);
41b4f86c
KS
622 req6->params = cpu_to_be32(cxgb4_select_ntuple(
623 ep->com.dev->rdev.lldi.ports[0],
624 ep->l2t));
830662f6
VP
625 req6->opt2 = cpu_to_be32(opt2);
626 }
f079af7a 627 } else {
830662f6
VP
628 if (ep->com.remote_addr.ss_family == AF_INET) {
629 t5_req = (struct cpl_t5_act_open_req *)
630 skb_put(skb, wrlen);
631 INIT_TP_WR(t5_req, 0);
632 OPCODE_TID(t5_req) = cpu_to_be32(
f079af7a
VP
633 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
634 ((ep->rss_qid << 14) | ep->atid)));
830662f6
VP
635 t5_req->local_port = la->sin_port;
636 t5_req->peer_port = ra->sin_port;
637 t5_req->local_ip = la->sin_addr.s_addr;
638 t5_req->peer_ip = ra->sin_addr.s_addr;
639 t5_req->opt0 = cpu_to_be64(opt0);
640 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
41b4f86c
KS
641 cxgb4_select_ntuple(
642 ep->com.dev->rdev.lldi.ports[0],
643 ep->l2t)));
830662f6
VP
644 t5_req->opt2 = cpu_to_be32(opt2);
645 } else {
646 t5_req6 = (struct cpl_t5_act_open_req6 *)
647 skb_put(skb, wrlen);
648 INIT_TP_WR(t5_req6, 0);
649 OPCODE_TID(t5_req6) = cpu_to_be32(
650 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
651 ((ep->rss_qid<<14)|ep->atid)));
652 t5_req6->local_port = la6->sin6_port;
653 t5_req6->peer_port = ra6->sin6_port;
654 t5_req6->local_ip_hi = *((__be64 *)
655 (la6->sin6_addr.s6_addr));
656 t5_req6->local_ip_lo = *((__be64 *)
657 (la6->sin6_addr.s6_addr + 8));
658 t5_req6->peer_ip_hi = *((__be64 *)
659 (ra6->sin6_addr.s6_addr));
660 t5_req6->peer_ip_lo = *((__be64 *)
661 (ra6->sin6_addr.s6_addr + 8));
662 t5_req6->opt0 = cpu_to_be64(opt0);
663 t5_req6->params = (__force __be64)cpu_to_be32(
41b4f86c
KS
664 cxgb4_select_ntuple(
665 ep->com.dev->rdev.lldi.ports[0],
666 ep->l2t));
830662f6
VP
667 t5_req6->opt2 = cpu_to_be32(opt2);
668 }
f079af7a
VP
669 }
670
793dad94 671 set_bit(ACT_OPEN_REQ, &ep->com.history);
cfdda9d7
SW
672 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
673}
674
d2fe99e8
KS
675static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
676 u8 mpa_rev_to_use)
cfdda9d7
SW
677{
678 int mpalen, wrlen;
679 struct fw_ofld_tx_data_wr *req;
680 struct mpa_message *mpa;
d2fe99e8 681 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7
SW
682
683 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
684
685 BUG_ON(skb_cloned(skb));
686
687 mpalen = sizeof(*mpa) + ep->plen;
d2fe99e8
KS
688 if (mpa_rev_to_use == 2)
689 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
690 wrlen = roundup(mpalen + sizeof *req, 16);
691 skb = get_skb(skb, wrlen, GFP_KERNEL);
692 if (!skb) {
693 connect_reply_upcall(ep, -ENOMEM);
694 return;
695 }
696 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
697
698 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
699 memset(req, 0, wrlen);
700 req->op_to_immdlen = cpu_to_be32(
701 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
702 FW_WR_COMPL(1) |
703 FW_WR_IMMDLEN(mpalen));
704 req->flowid_len16 = cpu_to_be32(
705 FW_WR_FLOWID(ep->hwtid) |
706 FW_WR_LEN16(wrlen >> 4));
707 req->plen = cpu_to_be32(mpalen);
708 req->tunnel_to_proxy = cpu_to_be32(
709 FW_OFLD_TX_DATA_WR_FLUSH(1) |
710 FW_OFLD_TX_DATA_WR_SHOVE(1));
711
712 mpa = (struct mpa_message *)(req + 1);
713 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
714 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
d2fe99e8
KS
715 (markers_enabled ? MPA_MARKERS : 0) |
716 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
cfdda9d7 717 mpa->private_data_size = htons(ep->plen);
d2fe99e8 718 mpa->revision = mpa_rev_to_use;
01b225e1 719 if (mpa_rev_to_use == 1) {
d2fe99e8 720 ep->tried_with_mpa_v1 = 1;
01b225e1
KS
721 ep->retry_with_mpa_v1 = 0;
722 }
d2fe99e8
KS
723
724 if (mpa_rev_to_use == 2) {
f747c34a
RD
725 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
726 sizeof (struct mpa_v2_conn_params));
d2fe99e8
KS
727 mpa_v2_params.ird = htons((u16)ep->ird);
728 mpa_v2_params.ord = htons((u16)ep->ord);
729
730 if (peer2peer) {
731 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
732 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
733 mpa_v2_params.ord |=
734 htons(MPA_V2_RDMA_WRITE_RTR);
735 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
736 mpa_v2_params.ord |=
737 htons(MPA_V2_RDMA_READ_RTR);
738 }
739 memcpy(mpa->private_data, &mpa_v2_params,
740 sizeof(struct mpa_v2_conn_params));
cfdda9d7 741
d2fe99e8
KS
742 if (ep->plen)
743 memcpy(mpa->private_data +
744 sizeof(struct mpa_v2_conn_params),
745 ep->mpa_pkt + sizeof(*mpa), ep->plen);
746 } else
747 if (ep->plen)
748 memcpy(mpa->private_data,
749 ep->mpa_pkt + sizeof(*mpa), ep->plen);
cfdda9d7
SW
750
751 /*
752 * Reference the mpa skb. This ensures the data area
753 * will remain in memory until the hw acks the tx.
754 * Function fw4_ack() will deref it.
755 */
756 skb_get(skb);
757 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
758 BUG_ON(ep->mpa_skb);
759 ep->mpa_skb = skb;
760 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
761 start_ep_timer(ep);
762 state_set(&ep->com, MPA_REQ_SENT);
763 ep->mpa_attr.initiator = 1;
764 return;
765}
766
767static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
768{
769 int mpalen, wrlen;
770 struct fw_ofld_tx_data_wr *req;
771 struct mpa_message *mpa;
772 struct sk_buff *skb;
d2fe99e8 773 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7
SW
774
775 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
776
777 mpalen = sizeof(*mpa) + plen;
d2fe99e8
KS
778 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
779 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
780 wrlen = roundup(mpalen + sizeof *req, 16);
781
782 skb = get_skb(NULL, wrlen, GFP_KERNEL);
783 if (!skb) {
784 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
785 return -ENOMEM;
786 }
787 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
788
789 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
790 memset(req, 0, wrlen);
791 req->op_to_immdlen = cpu_to_be32(
792 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
793 FW_WR_COMPL(1) |
794 FW_WR_IMMDLEN(mpalen));
795 req->flowid_len16 = cpu_to_be32(
796 FW_WR_FLOWID(ep->hwtid) |
797 FW_WR_LEN16(wrlen >> 4));
798 req->plen = cpu_to_be32(mpalen);
799 req->tunnel_to_proxy = cpu_to_be32(
800 FW_OFLD_TX_DATA_WR_FLUSH(1) |
801 FW_OFLD_TX_DATA_WR_SHOVE(1));
802
803 mpa = (struct mpa_message *)(req + 1);
804 memset(mpa, 0, sizeof(*mpa));
805 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
806 mpa->flags = MPA_REJECT;
fe7e0a4d 807 mpa->revision = ep->mpa_attr.version;
cfdda9d7 808 mpa->private_data_size = htons(plen);
d2fe99e8
KS
809
810 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
811 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
f747c34a
RD
812 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
813 sizeof (struct mpa_v2_conn_params));
d2fe99e8
KS
814 mpa_v2_params.ird = htons(((u16)ep->ird) |
815 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
816 0));
817 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
818 (p2p_type ==
819 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
820 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
821 FW_RI_INIT_P2PTYPE_READ_REQ ?
822 MPA_V2_RDMA_READ_RTR : 0) : 0));
823 memcpy(mpa->private_data, &mpa_v2_params,
824 sizeof(struct mpa_v2_conn_params));
825
826 if (ep->plen)
827 memcpy(mpa->private_data +
828 sizeof(struct mpa_v2_conn_params), pdata, plen);
829 } else
830 if (plen)
831 memcpy(mpa->private_data, pdata, plen);
cfdda9d7
SW
832
833 /*
834 * Reference the mpa skb again. This ensures the data area
835 * will remain in memory until the hw acks the tx.
836 * Function fw4_ack() will deref it.
837 */
838 skb_get(skb);
839 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
840 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
841 BUG_ON(ep->mpa_skb);
842 ep->mpa_skb = skb;
843 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
844}
845
846static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
847{
848 int mpalen, wrlen;
849 struct fw_ofld_tx_data_wr *req;
850 struct mpa_message *mpa;
851 struct sk_buff *skb;
d2fe99e8 852 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7
SW
853
854 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
855
856 mpalen = sizeof(*mpa) + plen;
d2fe99e8
KS
857 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
858 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
859 wrlen = roundup(mpalen + sizeof *req, 16);
860
861 skb = get_skb(NULL, wrlen, GFP_KERNEL);
862 if (!skb) {
863 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
864 return -ENOMEM;
865 }
866 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
867
868 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
869 memset(req, 0, wrlen);
870 req->op_to_immdlen = cpu_to_be32(
871 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
872 FW_WR_COMPL(1) |
873 FW_WR_IMMDLEN(mpalen));
874 req->flowid_len16 = cpu_to_be32(
875 FW_WR_FLOWID(ep->hwtid) |
876 FW_WR_LEN16(wrlen >> 4));
877 req->plen = cpu_to_be32(mpalen);
878 req->tunnel_to_proxy = cpu_to_be32(
879 FW_OFLD_TX_DATA_WR_FLUSH(1) |
880 FW_OFLD_TX_DATA_WR_SHOVE(1));
881
882 mpa = (struct mpa_message *)(req + 1);
883 memset(mpa, 0, sizeof(*mpa));
884 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
885 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
886 (markers_enabled ? MPA_MARKERS : 0);
d2fe99e8 887 mpa->revision = ep->mpa_attr.version;
cfdda9d7 888 mpa->private_data_size = htons(plen);
d2fe99e8
KS
889
890 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
891 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
f747c34a
RD
892 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
893 sizeof (struct mpa_v2_conn_params));
d2fe99e8
KS
894 mpa_v2_params.ird = htons((u16)ep->ird);
895 mpa_v2_params.ord = htons((u16)ep->ord);
896 if (peer2peer && (ep->mpa_attr.p2p_type !=
897 FW_RI_INIT_P2PTYPE_DISABLED)) {
898 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
899
900 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
901 mpa_v2_params.ord |=
902 htons(MPA_V2_RDMA_WRITE_RTR);
903 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
904 mpa_v2_params.ord |=
905 htons(MPA_V2_RDMA_READ_RTR);
906 }
907
908 memcpy(mpa->private_data, &mpa_v2_params,
909 sizeof(struct mpa_v2_conn_params));
910
911 if (ep->plen)
912 memcpy(mpa->private_data +
913 sizeof(struct mpa_v2_conn_params), pdata, plen);
914 } else
915 if (plen)
916 memcpy(mpa->private_data, pdata, plen);
cfdda9d7
SW
917
918 /*
919 * Reference the mpa skb. This ensures the data area
920 * will remain in memory until the hw acks the tx.
921 * Function fw4_ack() will deref it.
922 */
923 skb_get(skb);
924 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
925 ep->mpa_skb = skb;
926 state_set(&ep->com, MPA_REP_SENT);
927 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
928}
929
930static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
931{
932 struct c4iw_ep *ep;
933 struct cpl_act_establish *req = cplhdr(skb);
934 unsigned int tid = GET_TID(req);
935 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
936 struct tid_info *t = dev->rdev.lldi.tids;
937
938 ep = lookup_atid(t, atid);
939
940 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
941 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
942
943 dst_confirm(ep->dst);
944
945 /* setup the hwtid for this connection */
946 ep->hwtid = tid;
947 cxgb4_insert_tid(t, ep, tid);
793dad94 948 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
cfdda9d7
SW
949
950 ep->snd_seq = be32_to_cpu(req->snd_isn);
951 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
952
953 set_emss(ep, ntohs(req->tcp_opt));
954
955 /* dealloc the atid */
793dad94 956 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cfdda9d7 957 cxgb4_free_atid(t, atid);
793dad94 958 set_bit(ACT_ESTAB, &ep->com.history);
cfdda9d7
SW
959
960 /* start MPA negotiation */
961 send_flowc(ep, NULL);
d2fe99e8
KS
962 if (ep->retry_with_mpa_v1)
963 send_mpa_req(ep, skb, 1);
964 else
965 send_mpa_req(ep, skb, mpa_rev);
cfdda9d7
SW
966
967 return 0;
968}
969
970static void close_complete_upcall(struct c4iw_ep *ep)
971{
972 struct iw_cm_event event;
973
974 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
975 memset(&event, 0, sizeof(event));
976 event.event = IW_CM_EVENT_CLOSE;
977 if (ep->com.cm_id) {
978 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
979 ep, ep->com.cm_id, ep->hwtid);
980 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
981 ep->com.cm_id->rem_ref(ep->com.cm_id);
982 ep->com.cm_id = NULL;
793dad94 983 set_bit(CLOSE_UPCALL, &ep->com.history);
cfdda9d7
SW
984 }
985}
986
987static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
988{
989 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
990 close_complete_upcall(ep);
991 state_set(&ep->com, ABORTING);
793dad94 992 set_bit(ABORT_CONN, &ep->com.history);
cfdda9d7
SW
993 return send_abort(ep, skb, gfp);
994}
995
996static void peer_close_upcall(struct c4iw_ep *ep)
997{
998 struct iw_cm_event event;
999
1000 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1001 memset(&event, 0, sizeof(event));
1002 event.event = IW_CM_EVENT_DISCONNECT;
1003 if (ep->com.cm_id) {
1004 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1005 ep, ep->com.cm_id, ep->hwtid);
1006 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
793dad94 1007 set_bit(DISCONN_UPCALL, &ep->com.history);
cfdda9d7
SW
1008 }
1009}
1010
1011static void peer_abort_upcall(struct c4iw_ep *ep)
1012{
1013 struct iw_cm_event event;
1014
1015 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1016 memset(&event, 0, sizeof(event));
1017 event.event = IW_CM_EVENT_CLOSE;
1018 event.status = -ECONNRESET;
1019 if (ep->com.cm_id) {
1020 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
1021 ep->com.cm_id, ep->hwtid);
1022 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1023 ep->com.cm_id->rem_ref(ep->com.cm_id);
1024 ep->com.cm_id = NULL;
793dad94 1025 set_bit(ABORT_UPCALL, &ep->com.history);
cfdda9d7
SW
1026 }
1027}
1028
1029static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1030{
1031 struct iw_cm_event event;
1032
1033 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
1034 memset(&event, 0, sizeof(event));
1035 event.event = IW_CM_EVENT_CONNECT_REPLY;
1036 event.status = status;
24d44a39
SW
1037 memcpy(&event.local_addr, &ep->com.local_addr,
1038 sizeof(ep->com.local_addr));
1039 memcpy(&event.remote_addr, &ep->com.remote_addr,
1040 sizeof(ep->com.remote_addr));
cfdda9d7
SW
1041
1042 if ((status == 0) || (status == -ECONNREFUSED)) {
d2fe99e8
KS
1043 if (!ep->tried_with_mpa_v1) {
1044 /* this means MPA_v2 is used */
1045 event.private_data_len = ep->plen -
1046 sizeof(struct mpa_v2_conn_params);
1047 event.private_data = ep->mpa_pkt +
1048 sizeof(struct mpa_message) +
1049 sizeof(struct mpa_v2_conn_params);
1050 } else {
1051 /* this means MPA_v1 is used */
1052 event.private_data_len = ep->plen;
1053 event.private_data = ep->mpa_pkt +
1054 sizeof(struct mpa_message);
1055 }
cfdda9d7 1056 }
85963e4c
RD
1057
1058 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
1059 ep->hwtid, status);
793dad94 1060 set_bit(CONN_RPL_UPCALL, &ep->com.history);
85963e4c
RD
1061 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1062
cfdda9d7
SW
1063 if (status < 0) {
1064 ep->com.cm_id->rem_ref(ep->com.cm_id);
1065 ep->com.cm_id = NULL;
cfdda9d7
SW
1066 }
1067}
1068
1069static void connect_request_upcall(struct c4iw_ep *ep)
1070{
1071 struct iw_cm_event event;
1072
1073 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1074 memset(&event, 0, sizeof(event));
1075 event.event = IW_CM_EVENT_CONNECT_REQUEST;
24d44a39
SW
1076 memcpy(&event.local_addr, &ep->com.local_addr,
1077 sizeof(ep->com.local_addr));
1078 memcpy(&event.remote_addr, &ep->com.remote_addr,
1079 sizeof(ep->com.remote_addr));
cfdda9d7 1080 event.provider_data = ep;
d2fe99e8
KS
1081 if (!ep->tried_with_mpa_v1) {
1082 /* this means MPA_v2 is used */
1083 event.ord = ep->ord;
1084 event.ird = ep->ird;
1085 event.private_data_len = ep->plen -
1086 sizeof(struct mpa_v2_conn_params);
1087 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1088 sizeof(struct mpa_v2_conn_params);
1089 } else {
1090 /* this means MPA_v1 is used. Send max supported */
1091 event.ord = c4iw_max_read_depth;
1092 event.ird = c4iw_max_read_depth;
1093 event.private_data_len = ep->plen;
1094 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1095 }
cfdda9d7
SW
1096 if (state_read(&ep->parent_ep->com) != DEAD) {
1097 c4iw_get_ep(&ep->com);
1098 ep->parent_ep->com.cm_id->event_handler(
1099 ep->parent_ep->com.cm_id,
1100 &event);
1101 }
793dad94 1102 set_bit(CONNREQ_UPCALL, &ep->com.history);
cfdda9d7
SW
1103 c4iw_put_ep(&ep->parent_ep->com);
1104 ep->parent_ep = NULL;
1105}
1106
1107static void established_upcall(struct c4iw_ep *ep)
1108{
1109 struct iw_cm_event event;
1110
1111 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1112 memset(&event, 0, sizeof(event));
1113 event.event = IW_CM_EVENT_ESTABLISHED;
d2fe99e8
KS
1114 event.ird = ep->ird;
1115 event.ord = ep->ord;
cfdda9d7
SW
1116 if (ep->com.cm_id) {
1117 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1118 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
793dad94 1119 set_bit(ESTAB_UPCALL, &ep->com.history);
cfdda9d7
SW
1120 }
1121}
1122
1123static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1124{
1125 struct cpl_rx_data_ack *req;
1126 struct sk_buff *skb;
1127 int wrlen = roundup(sizeof *req, 16);
1128
1129 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1130 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1131 if (!skb) {
1132 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1133 return 0;
1134 }
1135
1136 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1137 memset(req, 0, wrlen);
1138 INIT_TP_WR(req, ep->hwtid);
1139 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1140 ep->hwtid));
ba6d3925
SW
1141 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
1142 F_RX_DACK_CHANGE |
1143 V_RX_DACK_MODE(dack_mode));
d4f1a5c6 1144 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
cfdda9d7
SW
1145 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1146 return credits;
1147}
1148
1149static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1150{
1151 struct mpa_message *mpa;
d2fe99e8 1152 struct mpa_v2_conn_params *mpa_v2_params;
cfdda9d7 1153 u16 plen;
d2fe99e8
KS
1154 u16 resp_ird, resp_ord;
1155 u8 rtr_mismatch = 0, insuff_ird = 0;
cfdda9d7
SW
1156 struct c4iw_qp_attributes attrs;
1157 enum c4iw_qp_attr_mask mask;
1158 int err;
1159
1160 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1161
1162 /*
1163 * Stop mpa timer. If it expired, then the state has
1164 * changed and we bail since ep_timeout already aborted
1165 * the connection.
1166 */
1167 stop_ep_timer(ep);
1168 if (state_read(&ep->com) != MPA_REQ_SENT)
1169 return;
1170
1171 /*
1172 * If we get more than the supported amount of private data
1173 * then we must fail this connection.
1174 */
1175 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1176 err = -EINVAL;
1177 goto err;
1178 }
1179
1180 /*
1181 * copy the new data into our accumulation buffer.
1182 */
1183 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1184 skb->len);
1185 ep->mpa_pkt_len += skb->len;
1186
1187 /*
1188 * if we don't even have the mpa message, then bail.
1189 */
1190 if (ep->mpa_pkt_len < sizeof(*mpa))
1191 return;
1192 mpa = (struct mpa_message *) ep->mpa_pkt;
1193
1194 /* Validate MPA header. */
d2fe99e8
KS
1195 if (mpa->revision > mpa_rev) {
1196 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1197 " Received = %d\n", __func__, mpa_rev, mpa->revision);
cfdda9d7
SW
1198 err = -EPROTO;
1199 goto err;
1200 }
1201 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1202 err = -EPROTO;
1203 goto err;
1204 }
1205
1206 plen = ntohs(mpa->private_data_size);
1207
1208 /*
1209 * Fail if there's too much private data.
1210 */
1211 if (plen > MPA_MAX_PRIVATE_DATA) {
1212 err = -EPROTO;
1213 goto err;
1214 }
1215
1216 /*
1217 * If plen does not account for pkt size
1218 */
1219 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1220 err = -EPROTO;
1221 goto err;
1222 }
1223
1224 ep->plen = (u8) plen;
1225
1226 /*
1227 * If we don't have all the pdata yet, then bail.
1228 * We'll continue process when more data arrives.
1229 */
1230 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1231 return;
1232
1233 if (mpa->flags & MPA_REJECT) {
1234 err = -ECONNREFUSED;
1235 goto err;
1236 }
1237
1238 /*
1239 * If we get here we have accumulated the entire mpa
1240 * start reply message including private data. And
1241 * the MPA header is valid.
1242 */
1243 state_set(&ep->com, FPDU_MODE);
1244 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1245 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1246 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
d2fe99e8
KS
1247 ep->mpa_attr.version = mpa->revision;
1248 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1249
1250 if (mpa->revision == 2) {
1251 ep->mpa_attr.enhanced_rdma_conn =
1252 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1253 if (ep->mpa_attr.enhanced_rdma_conn) {
1254 mpa_v2_params = (struct mpa_v2_conn_params *)
1255 (ep->mpa_pkt + sizeof(*mpa));
1256 resp_ird = ntohs(mpa_v2_params->ird) &
1257 MPA_V2_IRD_ORD_MASK;
1258 resp_ord = ntohs(mpa_v2_params->ord) &
1259 MPA_V2_IRD_ORD_MASK;
1260
1261 /*
1262 * This is a double-check. Ideally, below checks are
1263 * not required since ird/ord stuff has been taken
1264 * care of in c4iw_accept_cr
1265 */
1266 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1267 err = -ENOMEM;
1268 ep->ird = resp_ord;
1269 ep->ord = resp_ird;
1270 insuff_ird = 1;
1271 }
1272
1273 if (ntohs(mpa_v2_params->ird) &
1274 MPA_V2_PEER2PEER_MODEL) {
1275 if (ntohs(mpa_v2_params->ord) &
1276 MPA_V2_RDMA_WRITE_RTR)
1277 ep->mpa_attr.p2p_type =
1278 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1279 else if (ntohs(mpa_v2_params->ord) &
1280 MPA_V2_RDMA_READ_RTR)
1281 ep->mpa_attr.p2p_type =
1282 FW_RI_INIT_P2PTYPE_READ_REQ;
1283 }
1284 }
1285 } else if (mpa->revision == 1)
1286 if (peer2peer)
1287 ep->mpa_attr.p2p_type = p2p_type;
1288
cfdda9d7 1289 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
d2fe99e8
KS
1290 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1291 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1292 ep->mpa_attr.recv_marker_enabled,
1293 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1294 ep->mpa_attr.p2p_type, p2p_type);
1295
1296 /*
1297 * If responder's RTR does not match with that of initiator, assign
1298 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1299 * generated when moving QP to RTS state.
1300 * A TERM message will be sent after QP has moved to RTS state
1301 */
91018f86 1302 if ((ep->mpa_attr.version == 2) && peer2peer &&
d2fe99e8
KS
1303 (ep->mpa_attr.p2p_type != p2p_type)) {
1304 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1305 rtr_mismatch = 1;
1306 }
cfdda9d7
SW
1307
1308 attrs.mpa_attr = ep->mpa_attr;
1309 attrs.max_ird = ep->ird;
1310 attrs.max_ord = ep->ord;
1311 attrs.llp_stream_handle = ep;
1312 attrs.next_state = C4IW_QP_STATE_RTS;
1313
1314 mask = C4IW_QP_ATTR_NEXT_STATE |
1315 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1316 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1317
1318 /* bind QP and TID with INIT_WR */
1319 err = c4iw_modify_qp(ep->com.qp->rhp,
1320 ep->com.qp, mask, &attrs, 1);
1321 if (err)
1322 goto err;
d2fe99e8
KS
1323
1324 /*
1325 * If responder's RTR requirement did not match with what initiator
1326 * supports, generate TERM message
1327 */
1328 if (rtr_mismatch) {
1329 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1330 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1331 attrs.ecode = MPA_NOMATCH_RTR;
1332 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1333 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1334 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1335 err = -ENOMEM;
1336 goto out;
1337 }
1338
1339 /*
1340 * Generate TERM if initiator IRD is not sufficient for responder
1341 * provided ORD. Currently, we do the same behaviour even when
1342 * responder provided IRD is also not sufficient as regards to
1343 * initiator ORD.
1344 */
1345 if (insuff_ird) {
1346 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1347 __func__);
1348 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1349 attrs.ecode = MPA_INSUFF_IRD;
1350 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1351 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1352 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1353 err = -ENOMEM;
1354 goto out;
1355 }
cfdda9d7
SW
1356 goto out;
1357err:
b21ef16a
SW
1358 state_set(&ep->com, ABORTING);
1359 send_abort(ep, skb, GFP_KERNEL);
cfdda9d7
SW
1360out:
1361 connect_reply_upcall(ep, err);
1362 return;
1363}
1364
1365static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1366{
1367 struct mpa_message *mpa;
d2fe99e8 1368 struct mpa_v2_conn_params *mpa_v2_params;
cfdda9d7
SW
1369 u16 plen;
1370
1371 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1372
1373 if (state_read(&ep->com) != MPA_REQ_WAIT)
1374 return;
1375
1376 /*
1377 * If we get more than the supported amount of private data
1378 * then we must fail this connection.
1379 */
1380 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1381 stop_ep_timer(ep);
1382 abort_connection(ep, skb, GFP_KERNEL);
1383 return;
1384 }
1385
1386 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1387
1388 /*
1389 * Copy the new data into our accumulation buffer.
1390 */
1391 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1392 skb->len);
1393 ep->mpa_pkt_len += skb->len;
1394
1395 /*
1396 * If we don't even have the mpa message, then bail.
1397 * We'll continue process when more data arrives.
1398 */
1399 if (ep->mpa_pkt_len < sizeof(*mpa))
1400 return;
1401
1402 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1403 stop_ep_timer(ep);
1404 mpa = (struct mpa_message *) ep->mpa_pkt;
1405
1406 /*
1407 * Validate MPA Header.
1408 */
d2fe99e8
KS
1409 if (mpa->revision > mpa_rev) {
1410 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1411 " Received = %d\n", __func__, mpa_rev, mpa->revision);
7c0a33d6 1412 stop_ep_timer(ep);
cfdda9d7
SW
1413 abort_connection(ep, skb, GFP_KERNEL);
1414 return;
1415 }
1416
1417 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
7c0a33d6 1418 stop_ep_timer(ep);
cfdda9d7
SW
1419 abort_connection(ep, skb, GFP_KERNEL);
1420 return;
1421 }
1422
1423 plen = ntohs(mpa->private_data_size);
1424
1425 /*
1426 * Fail if there's too much private data.
1427 */
1428 if (plen > MPA_MAX_PRIVATE_DATA) {
7c0a33d6 1429 stop_ep_timer(ep);
cfdda9d7
SW
1430 abort_connection(ep, skb, GFP_KERNEL);
1431 return;
1432 }
1433
1434 /*
1435 * If plen does not account for pkt size
1436 */
1437 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
7c0a33d6 1438 stop_ep_timer(ep);
cfdda9d7
SW
1439 abort_connection(ep, skb, GFP_KERNEL);
1440 return;
1441 }
1442 ep->plen = (u8) plen;
1443
1444 /*
1445 * If we don't have all the pdata yet, then bail.
1446 */
1447 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1448 return;
1449
1450 /*
1451 * If we get here we have accumulated the entire mpa
1452 * start reply message including private data.
1453 */
1454 ep->mpa_attr.initiator = 0;
1455 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1456 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1457 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
d2fe99e8
KS
1458 ep->mpa_attr.version = mpa->revision;
1459 if (mpa->revision == 1)
1460 ep->tried_with_mpa_v1 = 1;
1461 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1462
1463 if (mpa->revision == 2) {
1464 ep->mpa_attr.enhanced_rdma_conn =
1465 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1466 if (ep->mpa_attr.enhanced_rdma_conn) {
1467 mpa_v2_params = (struct mpa_v2_conn_params *)
1468 (ep->mpa_pkt + sizeof(*mpa));
1469 ep->ird = ntohs(mpa_v2_params->ird) &
1470 MPA_V2_IRD_ORD_MASK;
1471 ep->ord = ntohs(mpa_v2_params->ord) &
1472 MPA_V2_IRD_ORD_MASK;
1473 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1474 if (peer2peer) {
1475 if (ntohs(mpa_v2_params->ord) &
1476 MPA_V2_RDMA_WRITE_RTR)
1477 ep->mpa_attr.p2p_type =
1478 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1479 else if (ntohs(mpa_v2_params->ord) &
1480 MPA_V2_RDMA_READ_RTR)
1481 ep->mpa_attr.p2p_type =
1482 FW_RI_INIT_P2PTYPE_READ_REQ;
1483 }
1484 }
1485 } else if (mpa->revision == 1)
1486 if (peer2peer)
1487 ep->mpa_attr.p2p_type = p2p_type;
1488
cfdda9d7
SW
1489 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1490 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1491 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1492 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1493 ep->mpa_attr.p2p_type);
1494
1495 state_set(&ep->com, MPA_REQ_RCVD);
1496
1497 /* drive upcall */
1498 connect_request_upcall(ep);
1499 return;
1500}
1501
1502static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1503{
1504 struct c4iw_ep *ep;
1505 struct cpl_rx_data *hdr = cplhdr(skb);
1506 unsigned int dlen = ntohs(hdr->len);
1507 unsigned int tid = GET_TID(hdr);
1508 struct tid_info *t = dev->rdev.lldi.tids;
793dad94 1509 __u8 status = hdr->status;
cfdda9d7
SW
1510
1511 ep = lookup_tid(t, tid);
1512 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1513 skb_pull(skb, sizeof(*hdr));
1514 skb_trim(skb, dlen);
1515
cfdda9d7
SW
1516 /* update RX credits */
1517 update_rx_credits(ep, dlen);
1518
1519 switch (state_read(&ep->com)) {
1520 case MPA_REQ_SENT:
55abf8df 1521 ep->rcv_seq += dlen;
cfdda9d7
SW
1522 process_mpa_reply(ep, skb);
1523 break;
1524 case MPA_REQ_WAIT:
55abf8df 1525 ep->rcv_seq += dlen;
cfdda9d7
SW
1526 process_mpa_request(ep, skb);
1527 break;
1557967b
VP
1528 case FPDU_MODE: {
1529 struct c4iw_qp_attributes attrs;
1530 BUG_ON(!ep->com.qp);
e8e5b927 1531 if (status)
1557967b 1532 pr_err("%s Unexpected streaming data." \
04236df2
VP
1533 " qpid %u ep %p state %d tid %u status %d\n",
1534 __func__, ep->com.qp->wq.sq.qid, ep,
1535 state_read(&ep->com), ep->hwtid, status);
97d7ec0c 1536 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1557967b 1537 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
97d7ec0c 1538 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
cfdda9d7
SW
1539 break;
1540 }
1557967b
VP
1541 default:
1542 break;
1543 }
cfdda9d7
SW
1544 return 0;
1545}
1546
1547static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1548{
1549 struct c4iw_ep *ep;
1550 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
cfdda9d7
SW
1551 int release = 0;
1552 unsigned int tid = GET_TID(rpl);
1553 struct tid_info *t = dev->rdev.lldi.tids;
1554
1555 ep = lookup_tid(t, tid);
4984037b
VP
1556 if (!ep) {
1557 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1558 return 0;
1559 }
92dd6c3d 1560 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2f5b48c3 1561 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
1562 switch (ep->com.state) {
1563 case ABORTING:
91e9c071 1564 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
cfdda9d7
SW
1565 __state_set(&ep->com, DEAD);
1566 release = 1;
1567 break;
1568 default:
1569 printk(KERN_ERR "%s ep %p state %d\n",
1570 __func__, ep, ep->com.state);
1571 break;
1572 }
2f5b48c3 1573 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
1574
1575 if (release)
1576 release_ep_resources(ep);
1577 return 0;
1578}
1579
5be78ee9
VP
1580static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1581{
1582 struct sk_buff *skb;
1583 struct fw_ofld_connection_wr *req;
1584 unsigned int mtu_idx;
1585 int wscale;
830662f6 1586 struct sockaddr_in *sin;
5be78ee9
VP
1587
1588 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1589 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1590 memset(req, 0, sizeof(*req));
1591 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1592 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
41b4f86c
KS
1593 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1594 ep->com.dev->rdev.lldi.ports[0],
5be78ee9 1595 ep->l2t));
830662f6
VP
1596 sin = (struct sockaddr_in *)&ep->com.local_addr;
1597 req->le.lport = sin->sin_port;
1598 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
1599 sin = (struct sockaddr_in *)&ep->com.remote_addr;
1600 req->le.pport = sin->sin_port;
1601 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
5be78ee9
VP
1602 req->tcb.t_state_to_astid =
1603 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
1604 V_FW_OFLD_CONNECTION_WR_ASTID(atid));
1605 req->tcb.cplrxdataack_cplpassacceptrpl =
1606 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
ef5d6355 1607 req->tcb.tx_max = (__force __be32) jiffies;
793dad94 1608 req->tcb.rcv_adv = htons(1);
5be78ee9
VP
1609 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1610 wscale = compute_wscale(rcv_win);
ef5d6355 1611 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
5be78ee9
VP
1612 (nocong ? NO_CONG(1) : 0) |
1613 KEEP_ALIVE(1) |
1614 DELACK(1) |
1615 WND_SCALE(wscale) |
1616 MSS_IDX(mtu_idx) |
1617 L2T_IDX(ep->l2t->idx) |
1618 TX_CHAN(ep->tx_chan) |
1619 SMAC_SEL(ep->smac_idx) |
1620 DSCP(ep->tos) |
1621 ULP_MODE(ULP_MODE_TCPDDP) |
ef5d6355
VP
1622 RCV_BUFSIZ(rcv_win >> 10));
1623 req->tcb.opt2 = (__force __be32) (PACE(1) |
5be78ee9
VP
1624 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1625 RX_CHANNEL(0) |
1626 CCTRL_ECN(enable_ecn) |
ef5d6355 1627 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
5be78ee9 1628 if (enable_tcp_timestamps)
ef5d6355 1629 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
5be78ee9 1630 if (enable_tcp_sack)
ef5d6355 1631 req->tcb.opt2 |= (__force __be32) SACK_EN(1);
5be78ee9 1632 if (wscale && enable_tcp_window_scaling)
ef5d6355
VP
1633 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
1634 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
1635 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
793dad94
VP
1636 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1637 set_bit(ACT_OFLD_CONN, &ep->com.history);
5be78ee9
VP
1638 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1639}
1640
cfdda9d7
SW
1641/*
1642 * Return whether a failed active open has allocated a TID
1643 */
1644static inline int act_open_has_tid(int status)
1645{
1646 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1647 status != CPL_ERR_ARP_MISS;
1648}
1649
793dad94
VP
1650#define ACT_OPEN_RETRY_COUNT 2
1651
830662f6
VP
1652static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
1653 struct dst_entry *dst, struct c4iw_dev *cdev,
1654 bool clear_mpa_v1)
1655{
1656 struct neighbour *n;
1657 int err, step;
1658 struct net_device *pdev;
1659
1660 n = dst_neigh_lookup(dst, peer_ip);
1661 if (!n)
1662 return -ENODEV;
1663
1664 rcu_read_lock();
1665 err = -ENOMEM;
1666 if (n->dev->flags & IFF_LOOPBACK) {
1667 if (iptype == 4)
1668 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
1669 else if (IS_ENABLED(CONFIG_IPV6))
1670 for_each_netdev(&init_net, pdev) {
1671 if (ipv6_chk_addr(&init_net,
1672 (struct in6_addr *)peer_ip,
1673 pdev, 1))
1674 break;
1675 }
1676 else
1677 pdev = NULL;
1678
1679 if (!pdev) {
1680 err = -ENODEV;
1681 goto out;
1682 }
1683 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1684 n, pdev, 0);
1685 if (!ep->l2t)
1686 goto out;
1687 ep->mtu = pdev->mtu;
1688 ep->tx_chan = cxgb4_port_chan(pdev);
1689 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1690 step = cdev->rdev.lldi.ntxq /
1691 cdev->rdev.lldi.nchan;
1692 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1693 step = cdev->rdev.lldi.nrxq /
1694 cdev->rdev.lldi.nchan;
1695 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1696 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1697 cxgb4_port_idx(pdev) * step];
1698 dev_put(pdev);
1699 } else {
1700 pdev = get_real_dev(n->dev);
1701 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1702 n, pdev, 0);
1703 if (!ep->l2t)
1704 goto out;
1705 ep->mtu = dst_mtu(dst);
1706 ep->tx_chan = cxgb4_port_chan(n->dev);
1707 ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
1708 step = cdev->rdev.lldi.ntxq /
1709 cdev->rdev.lldi.nchan;
1710 ep->txq_idx = cxgb4_port_idx(n->dev) * step;
1711 ep->ctrlq_idx = cxgb4_port_idx(n->dev);
1712 step = cdev->rdev.lldi.nrxq /
1713 cdev->rdev.lldi.nchan;
1714 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1715 cxgb4_port_idx(n->dev) * step];
1716
1717 if (clear_mpa_v1) {
1718 ep->retry_with_mpa_v1 = 0;
1719 ep->tried_with_mpa_v1 = 0;
1720 }
1721 }
1722 err = 0;
1723out:
1724 rcu_read_unlock();
1725
1726 neigh_release(n);
1727
1728 return err;
1729}
1730
793dad94
VP
1731static int c4iw_reconnect(struct c4iw_ep *ep)
1732{
1733 int err = 0;
24d44a39
SW
1734 struct sockaddr_in *laddr = (struct sockaddr_in *)
1735 &ep->com.cm_id->local_addr;
1736 struct sockaddr_in *raddr = (struct sockaddr_in *)
1737 &ep->com.cm_id->remote_addr;
830662f6
VP
1738 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
1739 &ep->com.cm_id->local_addr;
1740 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
1741 &ep->com.cm_id->remote_addr;
1742 int iptype;
1743 __u8 *ra;
793dad94
VP
1744
1745 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1746 init_timer(&ep->timer);
1747
1748 /*
1749 * Allocate an active TID to initiate a TCP connection.
1750 */
1751 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1752 if (ep->atid == -1) {
1753 pr_err("%s - cannot alloc atid.\n", __func__);
1754 err = -ENOMEM;
1755 goto fail2;
1756 }
1757 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1758
1759 /* find a route */
830662f6
VP
1760 if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
1761 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
1762 raddr->sin_addr.s_addr, laddr->sin_port,
1763 raddr->sin_port, 0);
1764 iptype = 4;
1765 ra = (__u8 *)&raddr->sin_addr;
1766 } else {
1767 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
1768 raddr6->sin6_addr.s6_addr,
1769 laddr6->sin6_port, raddr6->sin6_port, 0,
1770 raddr6->sin6_scope_id);
1771 iptype = 6;
1772 ra = (__u8 *)&raddr6->sin6_addr;
1773 }
1774 if (!ep->dst) {
793dad94
VP
1775 pr_err("%s - cannot find route.\n", __func__);
1776 err = -EHOSTUNREACH;
1777 goto fail3;
1778 }
830662f6
VP
1779 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false);
1780 if (err) {
793dad94 1781 pr_err("%s - cannot alloc l2e.\n", __func__);
793dad94
VP
1782 goto fail4;
1783 }
1784
1785 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1786 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1787 ep->l2t->idx);
1788
1789 state_set(&ep->com, CONNECTING);
1790 ep->tos = 0;
1791
1792 /* send connect request to rnic */
1793 err = send_connect(ep);
1794 if (!err)
1795 goto out;
1796
1797 cxgb4_l2t_release(ep->l2t);
1798fail4:
1799 dst_release(ep->dst);
1800fail3:
1801 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
1802 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1803fail2:
1804 /*
1805 * remember to send notification to upper layer.
1806 * We are in here so the upper layer is not aware that this is
1807 * re-connect attempt and so, upper layer is still waiting for
1808 * response of 1st connect request.
1809 */
1810 connect_reply_upcall(ep, -ECONNRESET);
1811 c4iw_put_ep(&ep->com);
1812out:
1813 return err;
1814}
1815
cfdda9d7
SW
1816static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1817{
1818 struct c4iw_ep *ep;
1819 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1820 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1821 ntohl(rpl->atid_status)));
1822 struct tid_info *t = dev->rdev.lldi.tids;
1823 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
830662f6
VP
1824 struct sockaddr_in *la;
1825 struct sockaddr_in *ra;
1826 struct sockaddr_in6 *la6;
1827 struct sockaddr_in6 *ra6;
cfdda9d7
SW
1828
1829 ep = lookup_atid(t, atid);
830662f6
VP
1830 la = (struct sockaddr_in *)&ep->com.local_addr;
1831 ra = (struct sockaddr_in *)&ep->com.remote_addr;
1832 la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
1833 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
cfdda9d7
SW
1834
1835 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1836 status, status2errno(status));
1837
1838 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1839 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1840 atid);
1841 return 0;
1842 }
1843
793dad94
VP
1844 set_bit(ACT_OPEN_RPL, &ep->com.history);
1845
d716a2a0
VP
1846 /*
1847 * Log interesting failures.
1848 */
1849 switch (status) {
1850 case CPL_ERR_CONN_RESET:
1851 case CPL_ERR_CONN_TIMEDOUT:
1852 break;
5be78ee9 1853 case CPL_ERR_TCAM_FULL:
830662f6 1854 mutex_lock(&dev->rdev.stats.lock);
3b174d94 1855 dev->rdev.stats.tcam_full++;
830662f6
VP
1856 mutex_unlock(&dev->rdev.stats.lock);
1857 if (ep->com.local_addr.ss_family == AF_INET &&
1858 dev->rdev.lldi.enable_fw_ofld_conn) {
793dad94
VP
1859 send_fw_act_open_req(ep,
1860 GET_TID_TID(GET_AOPEN_ATID(
1861 ntohl(rpl->atid_status))));
1862 return 0;
1863 }
1864 break;
1865 case CPL_ERR_CONN_EXIST:
1866 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
1867 set_bit(ACT_RETRY_INUSE, &ep->com.history);
1868 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
1869 atid);
1870 cxgb4_free_atid(t, atid);
1871 dst_release(ep->dst);
1872 cxgb4_l2t_release(ep->l2t);
1873 c4iw_reconnect(ep);
1874 return 0;
1875 }
5be78ee9 1876 break;
d716a2a0 1877 default:
830662f6
VP
1878 if (ep->com.local_addr.ss_family == AF_INET) {
1879 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
1880 atid, status, status2errno(status),
1881 &la->sin_addr.s_addr, ntohs(la->sin_port),
1882 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
1883 } else {
1884 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
1885 atid, status, status2errno(status),
1886 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
1887 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
1888 }
d716a2a0
VP
1889 break;
1890 }
1891
cfdda9d7
SW
1892 connect_reply_upcall(ep, status2errno(status));
1893 state_set(&ep->com, DEAD);
1894
1895 if (status && act_open_has_tid(status))
1896 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1897
793dad94 1898 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cfdda9d7
SW
1899 cxgb4_free_atid(t, atid);
1900 dst_release(ep->dst);
1901 cxgb4_l2t_release(ep->l2t);
1902 c4iw_put_ep(&ep->com);
1903
1904 return 0;
1905}
1906
1907static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1908{
1909 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1910 struct tid_info *t = dev->rdev.lldi.tids;
1911 unsigned int stid = GET_TID(rpl);
1912 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1913
1914 if (!ep) {
1cab775c
VP
1915 PDBG("%s stid %d lookup failure!\n", __func__, stid);
1916 goto out;
cfdda9d7
SW
1917 }
1918 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1919 rpl->status, status2errno(rpl->status));
d9594d99 1920 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
cfdda9d7 1921
1cab775c 1922out:
cfdda9d7
SW
1923 return 0;
1924}
1925
cfdda9d7
SW
1926static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1927{
1928 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1929 struct tid_info *t = dev->rdev.lldi.tids;
1930 unsigned int stid = GET_TID(rpl);
1931 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1932
1933 PDBG("%s ep %p\n", __func__, ep);
d9594d99 1934 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
cfdda9d7
SW
1935 return 0;
1936}
1937
830662f6 1938static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
cfdda9d7
SW
1939 struct cpl_pass_accept_req *req)
1940{
1941 struct cpl_pass_accept_rpl *rpl;
1942 unsigned int mtu_idx;
1943 u64 opt0;
1944 u32 opt2;
1945 int wscale;
1946
1947 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1948 BUG_ON(skb_cloned(skb));
1949 skb_trim(skb, sizeof(*rpl));
1950 skb_get(skb);
1951 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1952 wscale = compute_wscale(rcv_win);
5be78ee9
VP
1953 opt0 = (nocong ? NO_CONG(1) : 0) |
1954 KEEP_ALIVE(1) |
ba6d3925 1955 DELACK(1) |
cfdda9d7
SW
1956 WND_SCALE(wscale) |
1957 MSS_IDX(mtu_idx) |
1958 L2T_IDX(ep->l2t->idx) |
1959 TX_CHAN(ep->tx_chan) |
1960 SMAC_SEL(ep->smac_idx) |
5be78ee9 1961 DSCP(ep->tos >> 2) |
b48f3b9c 1962 ULP_MODE(ULP_MODE_TCPDDP) |
cfdda9d7
SW
1963 RCV_BUFSIZ(rcv_win>>10);
1964 opt2 = RX_CHANNEL(0) |
1965 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1966
1967 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1968 opt2 |= TSTAMPS_EN(1);
1969 if (enable_tcp_sack && req->tcpopt.sack)
1970 opt2 |= SACK_EN(1);
1971 if (wscale && enable_tcp_window_scaling)
1972 opt2 |= WND_SCALE_EN(1);
5be78ee9
VP
1973 if (enable_ecn) {
1974 const struct tcphdr *tcph;
1975 u32 hlen = ntohl(req->hdr_len);
1976
1977 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
1978 G_IP_HDR_LEN(hlen);
1979 if (tcph->ece && tcph->cwr)
1980 opt2 |= CCTRL_ECN(1);
1981 }
cfdda9d7
SW
1982
1983 rpl = cplhdr(skb);
1984 INIT_TP_WR(rpl, ep->hwtid);
1985 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1986 ep->hwtid));
1987 rpl->opt0 = cpu_to_be64(opt0);
1988 rpl->opt2 = cpu_to_be32(opt2);
d4f1a5c6 1989 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
b38a0ad8 1990 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
cfdda9d7
SW
1991 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1992
1993 return;
1994}
1995
830662f6 1996static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
cfdda9d7 1997{
830662f6 1998 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
cfdda9d7
SW
1999 BUG_ON(skb_cloned(skb));
2000 skb_trim(skb, sizeof(struct cpl_tid_release));
2001 skb_get(skb);
2002 release_tid(&dev->rdev, hwtid, skb);
2003 return;
2004}
2005
830662f6
VP
2006static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
2007 __u8 *local_ip, __u8 *peer_ip,
cfdda9d7
SW
2008 __be16 *local_port, __be16 *peer_port)
2009{
2010 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
2011 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
2012 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
830662f6 2013 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
cfdda9d7
SW
2014 struct tcphdr *tcp = (struct tcphdr *)
2015 ((u8 *)(req + 1) + eth_len + ip_len);
2016
830662f6
VP
2017 if (ip->version == 4) {
2018 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
2019 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
2020 ntohs(tcp->dest));
2021 *iptype = 4;
2022 memcpy(peer_ip, &ip->saddr, 4);
2023 memcpy(local_ip, &ip->daddr, 4);
2024 } else {
2025 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
2026 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
2027 ntohs(tcp->dest));
2028 *iptype = 6;
2029 memcpy(peer_ip, ip6->saddr.s6_addr, 16);
2030 memcpy(local_ip, ip6->daddr.s6_addr, 16);
2031 }
cfdda9d7
SW
2032 *peer_port = tcp->source;
2033 *local_port = tcp->dest;
2034
2035 return;
2036}
2037
2038static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2039{
793dad94 2040 struct c4iw_ep *child_ep = NULL, *parent_ep;
cfdda9d7
SW
2041 struct cpl_pass_accept_req *req = cplhdr(skb);
2042 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
2043 struct tid_info *t = dev->rdev.lldi.tids;
2044 unsigned int hwtid = GET_TID(req);
2045 struct dst_entry *dst;
830662f6 2046 __u8 local_ip[16], peer_ip[16];
cfdda9d7 2047 __be16 local_port, peer_port;
3786cf18 2048 int err;
1cab775c 2049 u16 peer_mss = ntohs(req->tcpopt.mss);
830662f6 2050 int iptype;
cfdda9d7
SW
2051
2052 parent_ep = lookup_stid(t, stid);
1cab775c
VP
2053 if (!parent_ep) {
2054 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2055 goto reject;
2056 }
1cab775c 2057
cfdda9d7
SW
2058 if (state_read(&parent_ep->com) != LISTEN) {
2059 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
2060 __func__);
2061 goto reject;
2062 }
2063
830662f6
VP
2064 get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port);
2065
cfdda9d7 2066 /* Find output route */
830662f6
VP
2067 if (iptype == 4) {
2068 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2069 , __func__, parent_ep, hwtid,
2070 local_ip, peer_ip, ntohs(local_port),
2071 ntohs(peer_port), peer_mss);
2072 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
2073 local_port, peer_port,
2074 GET_POPEN_TOS(ntohl(req->tos_stid)));
2075 } else {
2076 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2077 , __func__, parent_ep, hwtid,
2078 local_ip, peer_ip, ntohs(local_port),
2079 ntohs(peer_port), peer_mss);
2080 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
2081 PASS_OPEN_TOS(ntohl(req->tos_stid)),
2082 ((struct sockaddr_in6 *)
2083 &parent_ep->com.local_addr)->sin6_scope_id);
2084 }
2085 if (!dst) {
cfdda9d7
SW
2086 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
2087 __func__);
2088 goto reject;
2089 }
3786cf18
DM
2090
2091 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2092 if (!child_ep) {
2093 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
cfdda9d7
SW
2094 __func__);
2095 dst_release(dst);
2096 goto reject;
2097 }
2098
830662f6 2099 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false);
3786cf18
DM
2100 if (err) {
2101 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
cfdda9d7 2102 __func__);
cfdda9d7 2103 dst_release(dst);
3786cf18 2104 kfree(child_ep);
cfdda9d7
SW
2105 goto reject;
2106 }
3786cf18 2107
1cab775c
VP
2108 if (peer_mss && child_ep->mtu > (peer_mss + 40))
2109 child_ep->mtu = peer_mss + 40;
2110
cfdda9d7
SW
2111 state_set(&child_ep->com, CONNECTING);
2112 child_ep->com.dev = dev;
2113 child_ep->com.cm_id = NULL;
830662f6
VP
2114 if (iptype == 4) {
2115 struct sockaddr_in *sin = (struct sockaddr_in *)
2116 &child_ep->com.local_addr;
2117 sin->sin_family = PF_INET;
2118 sin->sin_port = local_port;
2119 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2120 sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2121 sin->sin_family = PF_INET;
2122 sin->sin_port = peer_port;
2123 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2124 } else {
2125 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
2126 &child_ep->com.local_addr;
2127 sin6->sin6_family = PF_INET6;
2128 sin6->sin6_port = local_port;
2129 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2130 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2131 sin6->sin6_family = PF_INET6;
2132 sin6->sin6_port = peer_port;
2133 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2134 }
cfdda9d7
SW
2135 c4iw_get_ep(&parent_ep->com);
2136 child_ep->parent_ep = parent_ep;
2137 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
cfdda9d7
SW
2138 child_ep->dst = dst;
2139 child_ep->hwtid = hwtid;
cfdda9d7
SW
2140
2141 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
3786cf18 2142 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
cfdda9d7
SW
2143
2144 init_timer(&child_ep->timer);
2145 cxgb4_insert_tid(t, child_ep, hwtid);
b3de6cfe 2146 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
830662f6 2147 accept_cr(child_ep, skb, req);
793dad94 2148 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
cfdda9d7
SW
2149 goto out;
2150reject:
830662f6 2151 reject_cr(dev, hwtid, skb);
cfdda9d7
SW
2152out:
2153 return 0;
2154}
2155
2156static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2157{
2158 struct c4iw_ep *ep;
2159 struct cpl_pass_establish *req = cplhdr(skb);
2160 struct tid_info *t = dev->rdev.lldi.tids;
2161 unsigned int tid = GET_TID(req);
2162
2163 ep = lookup_tid(t, tid);
2164 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2165 ep->snd_seq = be32_to_cpu(req->snd_isn);
2166 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2167
1cab775c
VP
2168 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2169 ntohs(req->tcp_opt));
2170
cfdda9d7
SW
2171 set_emss(ep, ntohs(req->tcp_opt));
2172
2173 dst_confirm(ep->dst);
2174 state_set(&ep->com, MPA_REQ_WAIT);
2175 start_ep_timer(ep);
2176 send_flowc(ep, skb);
793dad94 2177 set_bit(PASS_ESTAB, &ep->com.history);
cfdda9d7
SW
2178
2179 return 0;
2180}
2181
2182static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2183{
2184 struct cpl_peer_close *hdr = cplhdr(skb);
2185 struct c4iw_ep *ep;
2186 struct c4iw_qp_attributes attrs;
cfdda9d7
SW
2187 int disconnect = 1;
2188 int release = 0;
cfdda9d7
SW
2189 struct tid_info *t = dev->rdev.lldi.tids;
2190 unsigned int tid = GET_TID(hdr);
8da7e7a5 2191 int ret;
cfdda9d7
SW
2192
2193 ep = lookup_tid(t, tid);
2194 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2195 dst_confirm(ep->dst);
2196
793dad94 2197 set_bit(PEER_CLOSE, &ep->com.history);
2f5b48c3 2198 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2199 switch (ep->com.state) {
2200 case MPA_REQ_WAIT:
2201 __state_set(&ep->com, CLOSING);
2202 break;
2203 case MPA_REQ_SENT:
2204 __state_set(&ep->com, CLOSING);
2205 connect_reply_upcall(ep, -ECONNRESET);
2206 break;
2207 case MPA_REQ_RCVD:
2208
2209 /*
2210 * We're gonna mark this puppy DEAD, but keep
2211 * the reference on it until the ULP accepts or
2212 * rejects the CR. Also wake up anyone waiting
2213 * in rdma connection migration (see c4iw_accept_cr()).
2214 */
2215 __state_set(&ep->com, CLOSING);
cfdda9d7 2216 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
d9594d99 2217 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
cfdda9d7
SW
2218 break;
2219 case MPA_REP_SENT:
2220 __state_set(&ep->com, CLOSING);
cfdda9d7 2221 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
d9594d99 2222 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
cfdda9d7
SW
2223 break;
2224 case FPDU_MODE:
ca5a2202 2225 start_ep_timer(ep);
cfdda9d7 2226 __state_set(&ep->com, CLOSING);
30c95c2d 2227 attrs.next_state = C4IW_QP_STATE_CLOSING;
8da7e7a5 2228 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
30c95c2d 2229 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
8da7e7a5
SW
2230 if (ret != -ECONNRESET) {
2231 peer_close_upcall(ep);
2232 disconnect = 1;
2233 }
cfdda9d7
SW
2234 break;
2235 case ABORTING:
2236 disconnect = 0;
2237 break;
2238 case CLOSING:
2239 __state_set(&ep->com, MORIBUND);
2240 disconnect = 0;
2241 break;
2242 case MORIBUND:
ca5a2202 2243 stop_ep_timer(ep);
cfdda9d7
SW
2244 if (ep->com.cm_id && ep->com.qp) {
2245 attrs.next_state = C4IW_QP_STATE_IDLE;
2246 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2247 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2248 }
2249 close_complete_upcall(ep);
2250 __state_set(&ep->com, DEAD);
2251 release = 1;
2252 disconnect = 0;
2253 break;
2254 case DEAD:
2255 disconnect = 0;
2256 break;
2257 default:
2258 BUG_ON(1);
2259 }
2f5b48c3 2260 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
2261 if (disconnect)
2262 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2263 if (release)
2264 release_ep_resources(ep);
2265 return 0;
2266}
2267
2268/*
2269 * Returns whether an ABORT_REQ_RSS message is a negative advice.
2270 */
2271static int is_neg_adv_abort(unsigned int status)
2272{
2273 return status == CPL_ERR_RTX_NEG_ADVICE ||
2274 status == CPL_ERR_PERSIST_NEG_ADVICE;
2275}
2276
2277static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2278{
2279 struct cpl_abort_req_rss *req = cplhdr(skb);
2280 struct c4iw_ep *ep;
2281 struct cpl_abort_rpl *rpl;
2282 struct sk_buff *rpl_skb;
2283 struct c4iw_qp_attributes attrs;
2284 int ret;
2285 int release = 0;
cfdda9d7
SW
2286 struct tid_info *t = dev->rdev.lldi.tids;
2287 unsigned int tid = GET_TID(req);
cfdda9d7
SW
2288
2289 ep = lookup_tid(t, tid);
2290 if (is_neg_adv_abort(req->status)) {
2291 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2292 ep->hwtid);
2293 return 0;
2294 }
cfdda9d7
SW
2295 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2296 ep->com.state);
793dad94 2297 set_bit(PEER_ABORT, &ep->com.history);
2f5b48c3
SW
2298
2299 /*
2300 * Wake up any threads in rdma_init() or rdma_fini().
d2fe99e8
KS
2301 * However, this is not needed if com state is just
2302 * MPA_REQ_SENT
2f5b48c3 2303 */
d2fe99e8
KS
2304 if (ep->com.state != MPA_REQ_SENT)
2305 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2f5b48c3
SW
2306
2307 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2308 switch (ep->com.state) {
2309 case CONNECTING:
2310 break;
2311 case MPA_REQ_WAIT:
ca5a2202 2312 stop_ep_timer(ep);
cfdda9d7
SW
2313 break;
2314 case MPA_REQ_SENT:
ca5a2202 2315 stop_ep_timer(ep);
fe7e0a4d 2316 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
d2fe99e8
KS
2317 connect_reply_upcall(ep, -ECONNRESET);
2318 else {
2319 /*
2320 * we just don't send notification upwards because we
2321 * want to retry with mpa_v1 without upper layers even
2322 * knowing it.
2323 *
2324 * do some housekeeping so as to re-initiate the
2325 * connection
2326 */
2327 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2328 mpa_rev);
2329 ep->retry_with_mpa_v1 = 1;
2330 }
cfdda9d7
SW
2331 break;
2332 case MPA_REP_SENT:
cfdda9d7
SW
2333 break;
2334 case MPA_REQ_RCVD:
cfdda9d7
SW
2335 break;
2336 case MORIBUND:
2337 case CLOSING:
ca5a2202 2338 stop_ep_timer(ep);
cfdda9d7
SW
2339 /*FALLTHROUGH*/
2340 case FPDU_MODE:
2341 if (ep->com.cm_id && ep->com.qp) {
2342 attrs.next_state = C4IW_QP_STATE_ERROR;
2343 ret = c4iw_modify_qp(ep->com.qp->rhp,
2344 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2345 &attrs, 1);
2346 if (ret)
2347 printk(KERN_ERR MOD
2348 "%s - qp <- error failed!\n",
2349 __func__);
2350 }
2351 peer_abort_upcall(ep);
2352 break;
2353 case ABORTING:
2354 break;
2355 case DEAD:
2356 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2f5b48c3 2357 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
2358 return 0;
2359 default:
2360 BUG_ON(1);
2361 break;
2362 }
2363 dst_confirm(ep->dst);
2364 if (ep->com.state != ABORTING) {
2365 __state_set(&ep->com, DEAD);
d2fe99e8
KS
2366 /* we don't release if we want to retry with mpa_v1 */
2367 if (!ep->retry_with_mpa_v1)
2368 release = 1;
cfdda9d7 2369 }
2f5b48c3 2370 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
2371
2372 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
2373 if (!rpl_skb) {
2374 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
2375 __func__);
2376 release = 1;
2377 goto out;
2378 }
2379 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2380 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2381 INIT_TP_WR(rpl, ep->hwtid);
2382 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2383 rpl->cmd = CPL_ABORT_NO_RST;
2384 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2385out:
cfdda9d7
SW
2386 if (release)
2387 release_ep_resources(ep);
fe7e0a4d
VP
2388 else if (ep->retry_with_mpa_v1) {
2389 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
d2fe99e8
KS
2390 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2391 dst_release(ep->dst);
2392 cxgb4_l2t_release(ep->l2t);
2393 c4iw_reconnect(ep);
2394 }
2395
cfdda9d7
SW
2396 return 0;
2397}
2398
2399static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2400{
2401 struct c4iw_ep *ep;
2402 struct c4iw_qp_attributes attrs;
2403 struct cpl_close_con_rpl *rpl = cplhdr(skb);
cfdda9d7
SW
2404 int release = 0;
2405 struct tid_info *t = dev->rdev.lldi.tids;
2406 unsigned int tid = GET_TID(rpl);
cfdda9d7
SW
2407
2408 ep = lookup_tid(t, tid);
2409
2410 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2411 BUG_ON(!ep);
2412
2413 /* The cm_id may be null if we failed to connect */
2f5b48c3 2414 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2415 switch (ep->com.state) {
2416 case CLOSING:
2417 __state_set(&ep->com, MORIBUND);
2418 break;
2419 case MORIBUND:
ca5a2202 2420 stop_ep_timer(ep);
cfdda9d7
SW
2421 if ((ep->com.cm_id) && (ep->com.qp)) {
2422 attrs.next_state = C4IW_QP_STATE_IDLE;
2423 c4iw_modify_qp(ep->com.qp->rhp,
2424 ep->com.qp,
2425 C4IW_QP_ATTR_NEXT_STATE,
2426 &attrs, 1);
2427 }
2428 close_complete_upcall(ep);
2429 __state_set(&ep->com, DEAD);
2430 release = 1;
2431 break;
2432 case ABORTING:
2433 case DEAD:
2434 break;
2435 default:
2436 BUG_ON(1);
2437 break;
2438 }
2f5b48c3 2439 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
2440 if (release)
2441 release_ep_resources(ep);
2442 return 0;
2443}
2444
2445static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2446{
0e42c1f4 2447 struct cpl_rdma_terminate *rpl = cplhdr(skb);
cfdda9d7 2448 struct tid_info *t = dev->rdev.lldi.tids;
0e42c1f4
SW
2449 unsigned int tid = GET_TID(rpl);
2450 struct c4iw_ep *ep;
2451 struct c4iw_qp_attributes attrs;
cfdda9d7
SW
2452
2453 ep = lookup_tid(t, tid);
0e42c1f4 2454 BUG_ON(!ep);
cfdda9d7 2455
30c95c2d 2456 if (ep && ep->com.qp) {
0e42c1f4
SW
2457 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2458 ep->com.qp->wq.sq.qid);
2459 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2460 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2461 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2462 } else
30c95c2d 2463 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
cfdda9d7 2464
cfdda9d7
SW
2465 return 0;
2466}
2467
2468/*
2469 * Upcall from the adapter indicating data has been transmitted.
2470 * For us its just the single MPA request or reply. We can now free
2471 * the skb holding the mpa message.
2472 */
2473static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2474{
2475 struct c4iw_ep *ep;
2476 struct cpl_fw4_ack *hdr = cplhdr(skb);
2477 u8 credits = hdr->credits;
2478 unsigned int tid = GET_TID(hdr);
2479 struct tid_info *t = dev->rdev.lldi.tids;
2480
2481
2482 ep = lookup_tid(t, tid);
2483 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
2484 if (credits == 0) {
aa1ad260
JP
2485 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2486 __func__, ep, ep->hwtid, state_read(&ep->com));
cfdda9d7
SW
2487 return 0;
2488 }
2489
2490 dst_confirm(ep->dst);
2491 if (ep->mpa_skb) {
2492 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2493 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
2494 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2495 kfree_skb(ep->mpa_skb);
2496 ep->mpa_skb = NULL;
2497 }
2498 return 0;
2499}
2500
cfdda9d7
SW
2501int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2502{
2503 int err;
2504 struct c4iw_ep *ep = to_ep(cm_id);
2505 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2506
2507 if (state_read(&ep->com) == DEAD) {
2508 c4iw_put_ep(&ep->com);
2509 return -ECONNRESET;
2510 }
793dad94 2511 set_bit(ULP_REJECT, &ep->com.history);
cfdda9d7
SW
2512 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2513 if (mpa_rev == 0)
2514 abort_connection(ep, NULL, GFP_KERNEL);
2515 else {
2516 err = send_mpa_reject(ep, pdata, pdata_len);
2517 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2518 }
2519 c4iw_put_ep(&ep->com);
2520 return 0;
2521}
2522
2523int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2524{
2525 int err;
2526 struct c4iw_qp_attributes attrs;
2527 enum c4iw_qp_attr_mask mask;
2528 struct c4iw_ep *ep = to_ep(cm_id);
2529 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2530 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2531
2532 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2533 if (state_read(&ep->com) == DEAD) {
2534 err = -ECONNRESET;
2535 goto err;
2536 }
2537
2538 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2539 BUG_ON(!qp);
2540
793dad94 2541 set_bit(ULP_ACCEPT, &ep->com.history);
be4c9bad
RD
2542 if ((conn_param->ord > c4iw_max_read_depth) ||
2543 (conn_param->ird > c4iw_max_read_depth)) {
cfdda9d7
SW
2544 abort_connection(ep, NULL, GFP_KERNEL);
2545 err = -EINVAL;
2546 goto err;
2547 }
2548
d2fe99e8
KS
2549 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2550 if (conn_param->ord > ep->ird) {
2551 ep->ird = conn_param->ird;
2552 ep->ord = conn_param->ord;
2553 send_mpa_reject(ep, conn_param->private_data,
2554 conn_param->private_data_len);
2555 abort_connection(ep, NULL, GFP_KERNEL);
2556 err = -ENOMEM;
2557 goto err;
2558 }
2559 if (conn_param->ird > ep->ord) {
2560 if (!ep->ord)
2561 conn_param->ird = 1;
2562 else {
2563 abort_connection(ep, NULL, GFP_KERNEL);
2564 err = -ENOMEM;
2565 goto err;
2566 }
2567 }
cfdda9d7 2568
d2fe99e8 2569 }
cfdda9d7
SW
2570 ep->ird = conn_param->ird;
2571 ep->ord = conn_param->ord;
2572
d2fe99e8
KS
2573 if (ep->mpa_attr.version != 2)
2574 if (peer2peer && ep->ird == 0)
2575 ep->ird = 1;
cfdda9d7
SW
2576
2577 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2578
d2fe99e8
KS
2579 cm_id->add_ref(cm_id);
2580 ep->com.cm_id = cm_id;
2581 ep->com.qp = qp;
325abead 2582 ref_qp(ep);
d2fe99e8 2583
cfdda9d7
SW
2584 /* bind QP to EP and move to RTS */
2585 attrs.mpa_attr = ep->mpa_attr;
2586 attrs.max_ird = ep->ird;
2587 attrs.max_ord = ep->ord;
2588 attrs.llp_stream_handle = ep;
2589 attrs.next_state = C4IW_QP_STATE_RTS;
2590
2591 /* bind QP and TID with INIT_WR */
2592 mask = C4IW_QP_ATTR_NEXT_STATE |
2593 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2594 C4IW_QP_ATTR_MPA_ATTR |
2595 C4IW_QP_ATTR_MAX_IRD |
2596 C4IW_QP_ATTR_MAX_ORD;
2597
2598 err = c4iw_modify_qp(ep->com.qp->rhp,
2599 ep->com.qp, mask, &attrs, 1);
2600 if (err)
2601 goto err1;
2602 err = send_mpa_reply(ep, conn_param->private_data,
2603 conn_param->private_data_len);
2604 if (err)
2605 goto err1;
2606
2607 state_set(&ep->com, FPDU_MODE);
2608 established_upcall(ep);
2609 c4iw_put_ep(&ep->com);
2610 return 0;
2611err1:
2612 ep->com.cm_id = NULL;
cfdda9d7
SW
2613 cm_id->rem_ref(cm_id);
2614err:
2615 c4iw_put_ep(&ep->com);
2616 return err;
2617}
2618
830662f6
VP
2619static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2620{
2621 struct in_device *ind;
2622 int found = 0;
2623 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
2624 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
2625
2626 ind = in_dev_get(dev->rdev.lldi.ports[0]);
2627 if (!ind)
2628 return -EADDRNOTAVAIL;
2629 for_primary_ifa(ind) {
2630 laddr->sin_addr.s_addr = ifa->ifa_address;
2631 raddr->sin_addr.s_addr = ifa->ifa_address;
2632 found = 1;
2633 break;
2634 }
2635 endfor_ifa(ind);
2636 in_dev_put(ind);
2637 return found ? 0 : -EADDRNOTAVAIL;
2638}
2639
2640static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
2641 unsigned char banned_flags)
2642{
2643 struct inet6_dev *idev;
2644 int err = -EADDRNOTAVAIL;
2645
2646 rcu_read_lock();
2647 idev = __in6_dev_get(dev);
2648 if (idev != NULL) {
2649 struct inet6_ifaddr *ifp;
2650
2651 read_lock_bh(&idev->lock);
2652 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2653 if (ifp->scope == IFA_LINK &&
2654 !(ifp->flags & banned_flags)) {
2655 memcpy(addr, &ifp->addr, 16);
2656 err = 0;
2657 break;
2658 }
2659 }
2660 read_unlock_bh(&idev->lock);
2661 }
2662 rcu_read_unlock();
2663 return err;
2664}
2665
2666static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2667{
2668 struct in6_addr uninitialized_var(addr);
2669 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
2670 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
2671
2672 if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
2673 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
2674 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
2675 return 0;
2676 }
2677 return -EADDRNOTAVAIL;
2678}
2679
cfdda9d7
SW
2680int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2681{
cfdda9d7
SW
2682 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2683 struct c4iw_ep *ep;
3786cf18 2684 int err = 0;
24d44a39
SW
2685 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
2686 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
830662f6
VP
2687 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
2688 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
2689 &cm_id->remote_addr;
2690 __u8 *ra;
2691 int iptype;
cfdda9d7 2692
be4c9bad
RD
2693 if ((conn_param->ord > c4iw_max_read_depth) ||
2694 (conn_param->ird > c4iw_max_read_depth)) {
2695 err = -EINVAL;
2696 goto out;
2697 }
cfdda9d7
SW
2698 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2699 if (!ep) {
2700 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2701 err = -ENOMEM;
2702 goto out;
2703 }
2704 init_timer(&ep->timer);
2705 ep->plen = conn_param->private_data_len;
2706 if (ep->plen)
2707 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2708 conn_param->private_data, ep->plen);
2709 ep->ird = conn_param->ird;
2710 ep->ord = conn_param->ord;
2711
2712 if (peer2peer && ep->ord == 0)
2713 ep->ord = 1;
2714
2715 cm_id->add_ref(cm_id);
2716 ep->com.dev = dev;
2717 ep->com.cm_id = cm_id;
2718 ep->com.qp = get_qhp(dev, conn_param->qpn);
830662f6
VP
2719 if (!ep->com.qp) {
2720 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
2721 err = -EINVAL;
2722 goto fail2;
2723 }
325abead 2724 ref_qp(ep);
cfdda9d7
SW
2725 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
2726 ep->com.qp, cm_id);
2727
2728 /*
2729 * Allocate an active TID to initiate a TCP connection.
2730 */
2731 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
2732 if (ep->atid == -1) {
2733 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2734 err = -ENOMEM;
2735 goto fail2;
2736 }
793dad94 2737 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
cfdda9d7 2738
830662f6
VP
2739 if (cm_id->remote_addr.ss_family == AF_INET) {
2740 iptype = 4;
2741 ra = (__u8 *)&raddr->sin_addr;
cfdda9d7 2742
830662f6
VP
2743 /*
2744 * Handle loopback requests to INADDR_ANY.
2745 */
2746 if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
2747 err = pick_local_ipaddrs(dev, cm_id);
2748 if (err)
2749 goto fail2;
2750 }
2751
2752 /* find a route */
2753 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
2754 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
2755 ra, ntohs(raddr->sin_port));
2756 ep->dst = find_route(dev, laddr->sin_addr.s_addr,
2757 raddr->sin_addr.s_addr, laddr->sin_port,
2758 raddr->sin_port, 0);
2759 } else {
2760 iptype = 6;
2761 ra = (__u8 *)&raddr6->sin6_addr;
2762
2763 /*
2764 * Handle loopback requests to INADDR_ANY.
2765 */
2766 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
2767 err = pick_local_ip6addrs(dev, cm_id);
2768 if (err)
2769 goto fail2;
2770 }
2771
2772 /* find a route */
2773 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
2774 __func__, laddr6->sin6_addr.s6_addr,
2775 ntohs(laddr6->sin6_port),
2776 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
2777 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
2778 raddr6->sin6_addr.s6_addr,
2779 laddr6->sin6_port, raddr6->sin6_port, 0,
2780 raddr6->sin6_scope_id);
2781 }
2782 if (!ep->dst) {
cfdda9d7
SW
2783 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2784 err = -EHOSTUNREACH;
2785 goto fail3;
2786 }
cfdda9d7 2787
830662f6 2788 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
3786cf18 2789 if (err) {
cfdda9d7 2790 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
cfdda9d7
SW
2791 goto fail4;
2792 }
2793
2794 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2795 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2796 ep->l2t->idx);
2797
2798 state_set(&ep->com, CONNECTING);
2799 ep->tos = 0;
24d44a39
SW
2800 memcpy(&ep->com.local_addr, &cm_id->local_addr,
2801 sizeof(ep->com.local_addr));
2802 memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
2803 sizeof(ep->com.remote_addr));
cfdda9d7
SW
2804
2805 /* send connect request to rnic */
2806 err = send_connect(ep);
2807 if (!err)
2808 goto out;
2809
2810 cxgb4_l2t_release(ep->l2t);
2811fail4:
2812 dst_release(ep->dst);
2813fail3:
793dad94 2814 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cfdda9d7
SW
2815 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2816fail2:
2817 cm_id->rem_ref(cm_id);
2818 c4iw_put_ep(&ep->com);
2819out:
2820 return err;
2821}
2822
830662f6
VP
2823static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
2824{
2825 int err;
2826 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2827
2828 c4iw_init_wr_wait(&ep->com.wr_wait);
2829 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
2830 ep->stid, &sin6->sin6_addr,
2831 sin6->sin6_port,
2832 ep->com.dev->rdev.lldi.rxq_ids[0]);
2833 if (!err)
2834 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
2835 &ep->com.wr_wait,
2836 0, 0, __func__);
2837 if (err)
2838 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
2839 err, ep->stid,
2840 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
2841 return err;
2842}
2843
2844static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
2845{
2846 int err;
2847 struct sockaddr_in *sin = (struct sockaddr_in *)&ep->com.local_addr;
2848
2849 if (dev->rdev.lldi.enable_fw_ofld_conn) {
2850 do {
2851 err = cxgb4_create_server_filter(
2852 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2853 sin->sin_addr.s_addr, sin->sin_port, 0,
2854 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
2855 if (err == -EBUSY) {
2856 set_current_state(TASK_UNINTERRUPTIBLE);
2857 schedule_timeout(usecs_to_jiffies(100));
2858 }
2859 } while (err == -EBUSY);
2860 } else {
2861 c4iw_init_wr_wait(&ep->com.wr_wait);
2862 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
2863 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
2864 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
2865 if (!err)
2866 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
2867 &ep->com.wr_wait,
2868 0, 0, __func__);
2869 }
2870 if (err)
2871 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
2872 , err, ep->stid,
2873 &sin->sin_addr, ntohs(sin->sin_port));
2874 return err;
2875}
2876
cfdda9d7
SW
2877int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2878{
2879 int err = 0;
2880 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2881 struct c4iw_listen_ep *ep;
2882
cfdda9d7
SW
2883 might_sleep();
2884
2885 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2886 if (!ep) {
2887 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2888 err = -ENOMEM;
2889 goto fail1;
2890 }
2891 PDBG("%s ep %p\n", __func__, ep);
2892 cm_id->add_ref(cm_id);
2893 ep->com.cm_id = cm_id;
2894 ep->com.dev = dev;
2895 ep->backlog = backlog;
24d44a39
SW
2896 memcpy(&ep->com.local_addr, &cm_id->local_addr,
2897 sizeof(ep->com.local_addr));
cfdda9d7
SW
2898
2899 /*
2900 * Allocate a server TID.
2901 */
8c044690
KS
2902 if (dev->rdev.lldi.enable_fw_ofld_conn &&
2903 ep->com.local_addr.ss_family == AF_INET)
830662f6
VP
2904 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
2905 cm_id->local_addr.ss_family, ep);
1cab775c 2906 else
830662f6
VP
2907 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
2908 cm_id->local_addr.ss_family, ep);
1cab775c 2909
cfdda9d7 2910 if (ep->stid == -1) {
be4c9bad 2911 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
cfdda9d7
SW
2912 err = -ENOMEM;
2913 goto fail2;
2914 }
793dad94 2915 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
cfdda9d7 2916 state_set(&ep->com, LISTEN);
830662f6
VP
2917 if (ep->com.local_addr.ss_family == AF_INET)
2918 err = create_server4(dev, ep);
2919 else
2920 err = create_server6(dev, ep);
cfdda9d7
SW
2921 if (!err) {
2922 cm_id->provider_data = ep;
2923 goto out;
2924 }
830662f6
VP
2925 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
2926 ep->com.local_addr.ss_family);
cfdda9d7
SW
2927fail2:
2928 cm_id->rem_ref(cm_id);
2929 c4iw_put_ep(&ep->com);
2930fail1:
2931out:
2932 return err;
2933}
2934
2935int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2936{
2937 int err;
2938 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2939
2940 PDBG("%s ep %p\n", __func__, ep);
2941
2942 might_sleep();
2943 state_set(&ep->com, DEAD);
830662f6
VP
2944 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
2945 ep->com.local_addr.ss_family == AF_INET) {
1cab775c
VP
2946 err = cxgb4_remove_server_filter(
2947 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2948 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
2949 } else {
2950 c4iw_init_wr_wait(&ep->com.wr_wait);
830662f6
VP
2951 err = cxgb4_remove_server(
2952 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2953 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
1cab775c
VP
2954 if (err)
2955 goto done;
2956 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
2957 0, 0, __func__);
2958 }
793dad94 2959 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
830662f6
VP
2960 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
2961 ep->com.local_addr.ss_family);
cfdda9d7 2962done:
cfdda9d7
SW
2963 cm_id->rem_ref(cm_id);
2964 c4iw_put_ep(&ep->com);
2965 return err;
2966}
2967
2968int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2969{
2970 int ret = 0;
cfdda9d7
SW
2971 int close = 0;
2972 int fatal = 0;
2973 struct c4iw_rdev *rdev;
cfdda9d7 2974
2f5b48c3 2975 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2976
2977 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2978 states[ep->com.state], abrupt);
2979
2980 rdev = &ep->com.dev->rdev;
2981 if (c4iw_fatal_error(rdev)) {
2982 fatal = 1;
2983 close_complete_upcall(ep);
2984 ep->com.state = DEAD;
2985 }
2986 switch (ep->com.state) {
2987 case MPA_REQ_WAIT:
2988 case MPA_REQ_SENT:
2989 case MPA_REQ_RCVD:
2990 case MPA_REP_SENT:
2991 case FPDU_MODE:
2992 close = 1;
2993 if (abrupt)
2994 ep->com.state = ABORTING;
2995 else {
2996 ep->com.state = CLOSING;
ca5a2202 2997 start_ep_timer(ep);
cfdda9d7
SW
2998 }
2999 set_bit(CLOSE_SENT, &ep->com.flags);
3000 break;
3001 case CLOSING:
3002 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3003 close = 1;
3004 if (abrupt) {
ca5a2202 3005 stop_ep_timer(ep);
cfdda9d7
SW
3006 ep->com.state = ABORTING;
3007 } else
3008 ep->com.state = MORIBUND;
3009 }
3010 break;
3011 case MORIBUND:
3012 case ABORTING:
3013 case DEAD:
3014 PDBG("%s ignoring disconnect ep %p state %u\n",
3015 __func__, ep, ep->com.state);
3016 break;
3017 default:
3018 BUG();
3019 break;
3020 }
3021
cfdda9d7 3022 if (close) {
8da7e7a5 3023 if (abrupt) {
793dad94 3024 set_bit(EP_DISC_ABORT, &ep->com.history);
8da7e7a5
SW
3025 close_complete_upcall(ep);
3026 ret = send_abort(ep, NULL, gfp);
793dad94
VP
3027 } else {
3028 set_bit(EP_DISC_CLOSE, &ep->com.history);
cfdda9d7 3029 ret = send_halfclose(ep, gfp);
793dad94 3030 }
cfdda9d7
SW
3031 if (ret)
3032 fatal = 1;
3033 }
8da7e7a5 3034 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
3035 if (fatal)
3036 release_ep_resources(ep);
3037 return ret;
3038}
3039
1cab775c
VP
3040static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3041 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3042{
3043 struct c4iw_ep *ep;
793dad94 3044 int atid = be32_to_cpu(req->tid);
1cab775c 3045
ef5d6355
VP
3046 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3047 (__force u32) req->tid);
1cab775c
VP
3048 if (!ep)
3049 return;
3050
3051 switch (req->retval) {
3052 case FW_ENOMEM:
793dad94
VP
3053 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3054 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3055 send_fw_act_open_req(ep, atid);
3056 return;
3057 }
1cab775c 3058 case FW_EADDRINUSE:
793dad94
VP
3059 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3060 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3061 send_fw_act_open_req(ep, atid);
3062 return;
3063 }
1cab775c
VP
3064 break;
3065 default:
3066 pr_info("%s unexpected ofld conn wr retval %d\n",
3067 __func__, req->retval);
3068 break;
3069 }
793dad94
VP
3070 pr_err("active ofld_connect_wr failure %d atid %d\n",
3071 req->retval, atid);
3072 mutex_lock(&dev->rdev.stats.lock);
3073 dev->rdev.stats.act_ofld_conn_fails++;
3074 mutex_unlock(&dev->rdev.stats.lock);
1cab775c 3075 connect_reply_upcall(ep, status2errno(req->retval));
793dad94
VP
3076 state_set(&ep->com, DEAD);
3077 remove_handle(dev, &dev->atid_idr, atid);
3078 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3079 dst_release(ep->dst);
3080 cxgb4_l2t_release(ep->l2t);
3081 c4iw_put_ep(&ep->com);
1cab775c
VP
3082}
3083
3084static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3085 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3086{
3087 struct sk_buff *rpl_skb;
3088 struct cpl_pass_accept_req *cpl;
3089 int ret;
3090
710a3110 3091 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
1cab775c
VP
3092 BUG_ON(!rpl_skb);
3093 if (req->retval) {
3094 PDBG("%s passive open failure %d\n", __func__, req->retval);
793dad94
VP
3095 mutex_lock(&dev->rdev.stats.lock);
3096 dev->rdev.stats.pas_ofld_conn_fails++;
3097 mutex_unlock(&dev->rdev.stats.lock);
1cab775c
VP
3098 kfree_skb(rpl_skb);
3099 } else {
3100 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3101 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
ef5d6355
VP
3102 (__force u32) htonl(
3103 (__force u32) req->tid)));
1cab775c
VP
3104 ret = pass_accept_req(dev, rpl_skb);
3105 if (!ret)
3106 kfree_skb(rpl_skb);
3107 }
3108 return;
3109}
3110
3111static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2f5b48c3
SW
3112{
3113 struct cpl_fw6_msg *rpl = cplhdr(skb);
1cab775c
VP
3114 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3115
3116 switch (rpl->type) {
3117 case FW6_TYPE_CQE:
3118 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3119 break;
3120 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3121 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3122 switch (req->t_state) {
3123 case TCP_SYN_SENT:
3124 active_ofld_conn_reply(dev, skb, req);
3125 break;
3126 case TCP_SYN_RECV:
3127 passive_ofld_conn_reply(dev, skb, req);
3128 break;
3129 default:
3130 pr_err("%s unexpected ofld conn wr state %d\n",
3131 __func__, req->t_state);
3132 break;
3133 }
3134 break;
3135 }
3136 return 0;
3137}
3138
3139static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3140{
3141 u32 l2info;
f079af7a 3142 u16 vlantag, len, hdr_len, eth_hdr_len;
1cab775c
VP
3143 u8 intf;
3144 struct cpl_rx_pkt *cpl = cplhdr(skb);
3145 struct cpl_pass_accept_req *req;
3146 struct tcp_options_received tmp_opt;
f079af7a 3147 struct c4iw_dev *dev;
1cab775c 3148
f079af7a 3149 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
1cab775c 3150 /* Store values from cpl_rx_pkt in temporary location. */
ef5d6355
VP
3151 vlantag = (__force u16) cpl->vlan;
3152 len = (__force u16) cpl->len;
3153 l2info = (__force u32) cpl->l2info;
3154 hdr_len = (__force u16) cpl->hdr_len;
1cab775c
VP
3155 intf = cpl->iff;
3156
3157 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3158
3159 /*
3160 * We need to parse the TCP options from SYN packet.
3161 * to generate cpl_pass_accept_req.
3162 */
3163 memset(&tmp_opt, 0, sizeof(tmp_opt));
3164 tcp_clear_options(&tmp_opt);
1a2c6181 3165 tcp_parse_options(skb, &tmp_opt, 0, NULL);
1cab775c
VP
3166
3167 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3168 memset(req, 0, sizeof(*req));
3169 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
ef5d6355
VP
3170 V_SYN_MAC_IDX(G_RX_MACIDX(
3171 (__force int) htonl(l2info))) |
1cab775c 3172 F_SYN_XACT_MATCH);
f079af7a
VP
3173 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3174 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
3175 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
ef5d6355
VP
3176 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
3177 (__force int) htonl(l2info))) |
3178 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
3179 (__force int) htons(hdr_len))) |
3180 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
3181 (__force int) htons(hdr_len))) |
f079af7a 3182 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
ef5d6355
VP
3183 req->vlan = (__force __be16) vlantag;
3184 req->len = (__force __be16) len;
1cab775c
VP
3185 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
3186 PASS_OPEN_TOS(tos));
3187 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3188 if (tmp_opt.wscale_ok)
3189 req->tcpopt.wsf = tmp_opt.snd_wscale;
3190 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3191 if (tmp_opt.sack_ok)
3192 req->tcpopt.sack = 1;
3193 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3194 return;
3195}
3196
3197static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3198 __be32 laddr, __be16 lport,
3199 __be32 raddr, __be16 rport,
3200 u32 rcv_isn, u32 filter, u16 window,
3201 u32 rss_qid, u8 port_id)
3202{
3203 struct sk_buff *req_skb;
3204 struct fw_ofld_connection_wr *req;
3205 struct cpl_pass_accept_req *cpl = cplhdr(skb);
3206
3207 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3208 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3209 memset(req, 0, sizeof(*req));
3210 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
3211 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
3212 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
ef5d6355 3213 req->le.filter = (__force __be32) filter;
1cab775c
VP
3214 req->le.lport = lport;
3215 req->le.pport = rport;
3216 req->le.u.ipv4.lip = laddr;
3217 req->le.u.ipv4.pip = raddr;
3218 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3219 req->tcb.rcv_adv = htons(window);
3220 req->tcb.t_state_to_astid =
3221 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
3222 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
3223 V_FW_OFLD_CONNECTION_WR_ASTID(
3224 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
3225
3226 /*
3227 * We store the qid in opt2 which will be used by the firmware
3228 * to send us the wr response.
3229 */
3230 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
3231
3232 /*
3233 * We initialize the MSS index in TCB to 0xF.
3234 * So that when driver sends cpl_pass_accept_rpl
3235 * TCB picks up the correct value. If this was 0
3236 * TP will ignore any value > 0 for MSS index.
3237 */
3238 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
710a3110 3239 req->cookie = (unsigned long)skb;
1cab775c
VP
3240
3241 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3242 cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3243}
3244
3245/*
3246 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3247 * messages when a filter is being used instead of server to
3248 * redirect a syn packet. When packets hit filter they are redirected
3249 * to the offload queue and driver tries to establish the connection
3250 * using firmware work request.
3251 */
3252static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3253{
3254 int stid;
3255 unsigned int filter;
3256 struct ethhdr *eh = NULL;
3257 struct vlan_ethhdr *vlan_eh = NULL;
3258 struct iphdr *iph;
3259 struct tcphdr *tcph;
3260 struct rss_header *rss = (void *)skb->data;
3261 struct cpl_rx_pkt *cpl = (void *)skb->data;
3262 struct cpl_pass_accept_req *req = (void *)(rss + 1);
3263 struct l2t_entry *e;
3264 struct dst_entry *dst;
1cab775c
VP
3265 struct c4iw_ep *lep;
3266 u16 window;
3267 struct port_info *pi;
3268 struct net_device *pdev;
f079af7a 3269 u16 rss_qid, eth_hdr_len;
1cab775c
VP
3270 int step;
3271 u32 tx_chan;
3272 struct neighbour *neigh;
3273
3274 /* Drop all non-SYN packets */
3275 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
3276 goto reject;
3277
3278 /*
3279 * Drop all packets which did not hit the filter.
3280 * Unlikely to happen.
3281 */
3282 if (!(rss->filter_hit && rss->filter_tid))
3283 goto reject;
3284
3285 /*
3286 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3287 */
a4ea025f 3288 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
1cab775c
VP
3289
3290 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3291 if (!lep) {
3292 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3293 goto reject;
3294 }
3295
f079af7a
VP
3296 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3297 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
3298 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
3299 if (eth_hdr_len == ETH_HLEN) {
1cab775c
VP
3300 eh = (struct ethhdr *)(req + 1);
3301 iph = (struct iphdr *)(eh + 1);
3302 } else {
3303 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3304 iph = (struct iphdr *)(vlan_eh + 1);
3305 skb->vlan_tci = ntohs(cpl->vlan);
3306 }
3307
3308 if (iph->version != 0x4)
3309 goto reject;
3310
3311 tcph = (struct tcphdr *)(iph + 1);
3312 skb_set_network_header(skb, (void *)iph - (void *)rss);
3313 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3314 skb_get(skb);
3315
3316 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3317 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3318 ntohs(tcph->source), iph->tos);
3319
830662f6
VP
3320 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3321 iph->tos);
3322 if (!dst) {
1cab775c
VP
3323 pr_err("%s - failed to find dst entry!\n",
3324 __func__);
3325 goto reject;
3326 }
1cab775c
VP
3327 neigh = dst_neigh_lookup_skb(dst, skb);
3328
aaa0c23c
ZZ
3329 if (!neigh) {
3330 pr_err("%s - failed to allocate neigh!\n",
3331 __func__);
3332 goto free_dst;
3333 }
3334
1cab775c
VP
3335 if (neigh->dev->flags & IFF_LOOPBACK) {
3336 pdev = ip_dev_find(&init_net, iph->daddr);
3337 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3338 pdev, 0);
3339 pi = (struct port_info *)netdev_priv(pdev);
3340 tx_chan = cxgb4_port_chan(pdev);
3341 dev_put(pdev);
3342 } else {
830662f6 3343 pdev = get_real_dev(neigh->dev);
1cab775c 3344 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
830662f6
VP
3345 pdev, 0);
3346 pi = (struct port_info *)netdev_priv(pdev);
3347 tx_chan = cxgb4_port_chan(pdev);
1cab775c
VP
3348 }
3349 if (!e) {
3350 pr_err("%s - failed to allocate l2t entry!\n",
3351 __func__);
3352 goto free_dst;
3353 }
3354
3355 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3356 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
ef5d6355 3357 window = (__force u16) htons((__force u16)tcph->window);
1cab775c
VP
3358
3359 /* Calcuate filter portion for LE region. */
41b4f86c
KS
3360 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3361 dev->rdev.lldi.ports[0],
3362 e));
1cab775c
VP
3363
3364 /*
3365 * Synthesize the cpl_pass_accept_req. We have everything except the
3366 * TID. Once firmware sends a reply with TID we update the TID field
3367 * in cpl and pass it through the regular cpl_pass_accept_req path.
3368 */
3369 build_cpl_pass_accept_req(skb, stid, iph->tos);
3370 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3371 tcph->source, ntohl(tcph->seq), filter, window,
3372 rss_qid, pi->port_id);
3373 cxgb4_l2t_release(e);
3374free_dst:
3375 dst_release(dst);
3376reject:
2f5b48c3
SW
3377 return 0;
3378}
3379
be4c9bad
RD
3380/*
3381 * These are the real handlers that are called from a
3382 * work queue.
3383 */
3384static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
3385 [CPL_ACT_ESTABLISH] = act_establish,
3386 [CPL_ACT_OPEN_RPL] = act_open_rpl,
3387 [CPL_RX_DATA] = rx_data,
3388 [CPL_ABORT_RPL_RSS] = abort_rpl,
3389 [CPL_ABORT_RPL] = abort_rpl,
3390 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
3391 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
3392 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
3393 [CPL_PASS_ESTABLISH] = pass_establish,
3394 [CPL_PEER_CLOSE] = peer_close,
3395 [CPL_ABORT_REQ_RSS] = peer_abort,
3396 [CPL_CLOSE_CON_RPL] = close_con_rpl,
3397 [CPL_RDMA_TERMINATE] = terminate,
2f5b48c3 3398 [CPL_FW4_ACK] = fw4_ack,
1cab775c
VP
3399 [CPL_FW6_MSG] = deferred_fw6_msg,
3400 [CPL_RX_PKT] = rx_pkt
be4c9bad
RD
3401};
3402
3403static void process_timeout(struct c4iw_ep *ep)
3404{
3405 struct c4iw_qp_attributes attrs;
3406 int abort = 1;
3407
2f5b48c3 3408 mutex_lock(&ep->com.mutex);
be4c9bad
RD
3409 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
3410 ep->com.state);
793dad94 3411 set_bit(TIMEDOUT, &ep->com.history);
be4c9bad
RD
3412 switch (ep->com.state) {
3413 case MPA_REQ_SENT:
3414 __state_set(&ep->com, ABORTING);
3415 connect_reply_upcall(ep, -ETIMEDOUT);
3416 break;
3417 case MPA_REQ_WAIT:
3418 __state_set(&ep->com, ABORTING);
3419 break;
3420 case CLOSING:
3421 case MORIBUND:
3422 if (ep->com.cm_id && ep->com.qp) {
3423 attrs.next_state = C4IW_QP_STATE_ERROR;
3424 c4iw_modify_qp(ep->com.qp->rhp,
3425 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
3426 &attrs, 1);
3427 }
3428 __state_set(&ep->com, ABORTING);
3429 break;
3430 default:
76f267b7 3431 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
be4c9bad 3432 __func__, ep, ep->hwtid, ep->com.state);
be4c9bad
RD
3433 abort = 0;
3434 }
2f5b48c3 3435 mutex_unlock(&ep->com.mutex);
be4c9bad
RD
3436 if (abort)
3437 abort_connection(ep, NULL, GFP_KERNEL);
3438 c4iw_put_ep(&ep->com);
3439}
3440
3441static void process_timedout_eps(void)
3442{
3443 struct c4iw_ep *ep;
3444
3445 spin_lock_irq(&timeout_lock);
3446 while (!list_empty(&timeout_list)) {
3447 struct list_head *tmp;
3448
3449 tmp = timeout_list.next;
3450 list_del(tmp);
3451 spin_unlock_irq(&timeout_lock);
3452 ep = list_entry(tmp, struct c4iw_ep, entry);
3453 process_timeout(ep);
3454 spin_lock_irq(&timeout_lock);
3455 }
3456 spin_unlock_irq(&timeout_lock);
3457}
3458
3459static void process_work(struct work_struct *work)
3460{
3461 struct sk_buff *skb = NULL;
3462 struct c4iw_dev *dev;
c1d7356c 3463 struct cpl_act_establish *rpl;
be4c9bad
RD
3464 unsigned int opcode;
3465 int ret;
3466
3467 while ((skb = skb_dequeue(&rxq))) {
3468 rpl = cplhdr(skb);
3469 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3470 opcode = rpl->ot.opcode;
3471
3472 BUG_ON(!work_handlers[opcode]);
3473 ret = work_handlers[opcode](dev, skb);
3474 if (!ret)
3475 kfree_skb(skb);
3476 }
3477 process_timedout_eps();
3478}
3479
3480static DECLARE_WORK(skb_work, process_work);
3481
3482static void ep_timeout(unsigned long arg)
3483{
3484 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
1ec779cc 3485 int kickit = 0;
be4c9bad
RD
3486
3487 spin_lock(&timeout_lock);
1ec779cc
VP
3488 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
3489 list_add_tail(&ep->entry, &timeout_list);
3490 kickit = 1;
3491 }
be4c9bad 3492 spin_unlock(&timeout_lock);
1ec779cc
VP
3493 if (kickit)
3494 queue_work(workq, &skb_work);
be4c9bad
RD
3495}
3496
cfdda9d7
SW
3497/*
3498 * All the CM events are handled on a work queue to have a safe context.
3499 */
3500static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
3501{
3502
3503 /*
3504 * Save dev in the skb->cb area.
3505 */
3506 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
3507
3508 /*
3509 * Queue the skb and schedule the worker thread.
3510 */
3511 skb_queue_tail(&rxq, skb);
3512 queue_work(workq, &skb_work);
3513 return 0;
3514}
3515
3516static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3517{
3518 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
3519
3520 if (rpl->status != CPL_ERR_NONE) {
3521 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
3522 "for tid %u\n", rpl->status, GET_TID(rpl));
3523 }
2f5b48c3 3524 kfree_skb(skb);
cfdda9d7
SW
3525 return 0;
3526}
3527
be4c9bad
RD
3528static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3529{
3530 struct cpl_fw6_msg *rpl = cplhdr(skb);
3531 struct c4iw_wr_wait *wr_waitp;
3532 int ret;
3533
3534 PDBG("%s type %u\n", __func__, rpl->type);
3535
3536 switch (rpl->type) {
5be78ee9 3537 case FW6_TYPE_WR_RPL:
be4c9bad 3538 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
c8e081a1 3539 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
be4c9bad 3540 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
d9594d99
SW
3541 if (wr_waitp)
3542 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2f5b48c3 3543 kfree_skb(skb);
be4c9bad 3544 break;
5be78ee9 3545 case FW6_TYPE_CQE:
5be78ee9 3546 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
1cab775c 3547 sched(dev, skb);
5be78ee9 3548 break;
be4c9bad
RD
3549 default:
3550 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
3551 rpl->type);
2f5b48c3 3552 kfree_skb(skb);
be4c9bad
RD
3553 break;
3554 }
3555 return 0;
3556}
3557
8da7e7a5
SW
3558static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3559{
3560 struct cpl_abort_req_rss *req = cplhdr(skb);
3561 struct c4iw_ep *ep;
3562 struct tid_info *t = dev->rdev.lldi.tids;
3563 unsigned int tid = GET_TID(req);
3564
3565 ep = lookup_tid(t, tid);
14b92228
SW
3566 if (!ep) {
3567 printk(KERN_WARNING MOD
3568 "Abort on non-existent endpoint, tid %d\n", tid);
3569 kfree_skb(skb);
3570 return 0;
3571 }
8da7e7a5
SW
3572 if (is_neg_adv_abort(req->status)) {
3573 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
3574 ep->hwtid);
3575 kfree_skb(skb);
3576 return 0;
3577 }
3578 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
3579 ep->com.state);
3580
3581 /*
3582 * Wake up any threads in rdma_init() or rdma_fini().
7c0a33d6
VP
3583 * However, if we are on MPAv2 and want to retry with MPAv1
3584 * then, don't wake up yet.
8da7e7a5 3585 */
7c0a33d6
VP
3586 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
3587 if (ep->com.state != MPA_REQ_SENT)
3588 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3589 } else
3590 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
8da7e7a5
SW
3591 sched(dev, skb);
3592 return 0;
3593}
3594
be4c9bad
RD
3595/*
3596 * Most upcalls from the T4 Core go to sched() to
3597 * schedule the processing on a work queue.
3598 */
3599c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
3600 [CPL_ACT_ESTABLISH] = sched,
3601 [CPL_ACT_OPEN_RPL] = sched,
3602 [CPL_RX_DATA] = sched,
3603 [CPL_ABORT_RPL_RSS] = sched,
3604 [CPL_ABORT_RPL] = sched,
3605 [CPL_PASS_OPEN_RPL] = sched,
3606 [CPL_CLOSE_LISTSRV_RPL] = sched,
3607 [CPL_PASS_ACCEPT_REQ] = sched,
3608 [CPL_PASS_ESTABLISH] = sched,
3609 [CPL_PEER_CLOSE] = sched,
3610 [CPL_CLOSE_CON_RPL] = sched,
8da7e7a5 3611 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
be4c9bad
RD
3612 [CPL_RDMA_TERMINATE] = sched,
3613 [CPL_FW4_ACK] = sched,
3614 [CPL_SET_TCB_RPL] = set_tcb_rpl,
1cab775c
VP
3615 [CPL_FW6_MSG] = fw6_msg,
3616 [CPL_RX_PKT] = sched
be4c9bad
RD
3617};
3618
cfdda9d7
SW
3619int __init c4iw_cm_init(void)
3620{
be4c9bad 3621 spin_lock_init(&timeout_lock);
cfdda9d7
SW
3622 skb_queue_head_init(&rxq);
3623
3624 workq = create_singlethread_workqueue("iw_cxgb4");
3625 if (!workq)
3626 return -ENOMEM;
3627
cfdda9d7
SW
3628 return 0;
3629}
3630
3631void __exit c4iw_cm_term(void)
3632{
be4c9bad 3633 WARN_ON(!list_empty(&timeout_list));
cfdda9d7
SW
3634 flush_workqueue(workq);
3635 destroy_workqueue(workq);
3636}