drm: content-type property for HDMI connector
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb4 / cm.c
CommitLineData
cfdda9d7 1/*
9eccfe10 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
cfdda9d7
SW
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
1cab775c 41#include <linux/if_vlan.h>
cfdda9d7
SW
42
43#include <net/neighbour.h>
44#include <net/netevent.h>
45#include <net/route.h>
1cab775c 46#include <net/tcp.h>
830662f6
VP
47#include <net/ip6_route.h>
48#include <net/addrconf.h>
cfdda9d7 49
11b8e22d
SW
50#include <rdma/ib_addr.h>
51
85e42b04 52#include <libcxgb_cm.h>
cfdda9d7 53#include "iw_cxgb4.h"
84cc6ac6 54#include "clip_tbl.h"
cfdda9d7
SW
55
56static char *states[] = {
57 "idle",
58 "listen",
59 "connecting",
60 "mpa_wait_req",
61 "mpa_req_sent",
62 "mpa_req_rcvd",
63 "mpa_rep_sent",
64 "fpdu_mode",
65 "aborting",
66 "closing",
67 "moribund",
68 "dead",
69 NULL,
70};
71
5be78ee9
VP
72static int nocong;
73module_param(nocong, int, 0644);
74MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
75
76static int enable_ecn;
77module_param(enable_ecn, int, 0644);
78MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
79
b52fe09e 80static int dack_mode = 1;
ba6d3925 81module_param(dack_mode, int, 0644);
b52fe09e 82MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
ba6d3925 83
4c2c5763 84uint c4iw_max_read_depth = 32;
be4c9bad 85module_param(c4iw_max_read_depth, int, 0644);
4c2c5763
HS
86MODULE_PARM_DESC(c4iw_max_read_depth,
87 "Per-connection max ORD/IRD (default=32)");
be4c9bad 88
cfdda9d7
SW
89static int enable_tcp_timestamps;
90module_param(enable_tcp_timestamps, int, 0644);
91MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
92
93static int enable_tcp_sack;
94module_param(enable_tcp_sack, int, 0644);
95MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
96
97static int enable_tcp_window_scaling = 1;
98module_param(enable_tcp_window_scaling, int, 0644);
99MODULE_PARM_DESC(enable_tcp_window_scaling,
100 "Enable tcp window scaling (default=1)");
101
df2d5130 102static int peer2peer = 1;
cfdda9d7 103module_param(peer2peer, int, 0644);
df2d5130 104MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
cfdda9d7
SW
105
106static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
107module_param(p2p_type, int, 0644);
108MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
109 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
110
111static int ep_timeout_secs = 60;
112module_param(ep_timeout_secs, int, 0644);
113MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
114 "in seconds (default=60)");
115
b8ac3112 116static int mpa_rev = 2;
cfdda9d7
SW
117module_param(mpa_rev, int, 0644);
118MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
ccd2c30b 119 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
b8ac3112 120 " compliant (default=2)");
cfdda9d7
SW
121
122static int markers_enabled;
123module_param(markers_enabled, int, 0644);
124MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
125
126static int crc_enabled = 1;
127module_param(crc_enabled, int, 0644);
128MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
129
130static int rcv_win = 256 * 1024;
131module_param(rcv_win, int, 0644);
132MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
133
98ae68b7 134static int snd_win = 128 * 1024;
cfdda9d7 135module_param(snd_win, int, 0644);
98ae68b7 136MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
cfdda9d7 137
cfdda9d7 138static struct workqueue_struct *workq;
cfdda9d7
SW
139
140static struct sk_buff_head rxq;
cfdda9d7
SW
141
142static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
a9346abe 143static void ep_timeout(struct timer_list *t);
cfdda9d7 144static void connect_reply_upcall(struct c4iw_ep *ep, int status);
9dec900c 145static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
cfdda9d7 146
be4c9bad
RD
147static LIST_HEAD(timeout_list);
148static spinlock_t timeout_lock;
149
9ca6f7cf
H
150static void deref_cm_id(struct c4iw_ep_common *epc)
151{
152 epc->cm_id->rem_ref(epc->cm_id);
153 epc->cm_id = NULL;
154 set_bit(CM_ID_DEREFED, &epc->history);
155}
156
157static void ref_cm_id(struct c4iw_ep_common *epc)
158{
159 set_bit(CM_ID_REFED, &epc->history);
160 epc->cm_id->add_ref(epc->cm_id);
161}
162
325abead
VP
163static void deref_qp(struct c4iw_ep *ep)
164{
165 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
166 clear_bit(QP_REFERENCED, &ep->com.flags);
9ca6f7cf 167 set_bit(QP_DEREFED, &ep->com.history);
325abead
VP
168}
169
170static void ref_qp(struct c4iw_ep *ep)
171{
172 set_bit(QP_REFERENCED, &ep->com.flags);
9ca6f7cf 173 set_bit(QP_REFED, &ep->com.history);
325abead
VP
174 c4iw_qp_add_ref(&ep->com.qp->ibqp);
175}
176
cfdda9d7
SW
177static void start_ep_timer(struct c4iw_ep *ep)
178{
548ddb19 179 pr_debug("ep %p\n", ep);
cfdda9d7 180 if (timer_pending(&ep->timer)) {
1ec779cc
VP
181 pr_err("%s timer already started! ep %p\n",
182 __func__, ep);
183 return;
184 }
185 clear_bit(TIMEOUT, &ep->com.flags);
186 c4iw_get_ep(&ep->com);
cfdda9d7 187 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
cfdda9d7
SW
188 add_timer(&ep->timer);
189}
190
b33bd0cb 191static int stop_ep_timer(struct c4iw_ep *ep)
cfdda9d7 192{
548ddb19 193 pr_debug("ep %p stopping\n", ep);
cfdda9d7 194 del_timer_sync(&ep->timer);
b33bd0cb 195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1ec779cc 196 c4iw_put_ep(&ep->com);
b33bd0cb
SW
197 return 0;
198 }
199 return 1;
cfdda9d7
SW
200}
201
202static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
203 struct l2t_entry *l2e)
204{
205 int error = 0;
206
207 if (c4iw_fatal_error(rdev)) {
208 kfree_skb(skb);
4d45b757 209 pr_err("%s - device in error state - dropping\n", __func__);
cfdda9d7
SW
210 return -EIO;
211 }
212 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
213 if (error < 0)
214 kfree_skb(skb);
caa6c9f2
H
215 else if (error == NET_XMIT_DROP)
216 return -ENOMEM;
74594861 217 return error < 0 ? error : 0;
cfdda9d7
SW
218}
219
220int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
221{
222 int error = 0;
223
224 if (c4iw_fatal_error(rdev)) {
225 kfree_skb(skb);
4d45b757 226 pr_err("%s - device in error state - dropping\n", __func__);
cfdda9d7
SW
227 return -EIO;
228 }
229 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
230 if (error < 0)
231 kfree_skb(skb);
74594861 232 return error < 0 ? error : 0;
cfdda9d7
SW
233}
234
235static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
236{
a1a23454 237 u32 len = roundup(sizeof(struct cpl_tid_release), 16);
cfdda9d7 238
a1a23454 239 skb = get_skb(skb, len, GFP_KERNEL);
cfdda9d7
SW
240 if (!skb)
241 return;
a1a23454
VP
242
243 cxgb_mk_tid_release(skb, len, hwtid, 0);
cfdda9d7
SW
244 c4iw_ofld_send(rdev, skb);
245 return;
246}
247
248static void set_emss(struct c4iw_ep *ep, u16 opt)
249{
6c53e938 250 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
04524a47
H
251 ((AF_INET == ep->com.remote_addr.ss_family) ?
252 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
253 sizeof(struct tcphdr);
cfdda9d7 254 ep->mss = ep->emss;
6c53e938 255 if (TCPOPT_TSTAMP_G(opt))
04524a47 256 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
cfdda9d7
SW
257 if (ep->emss < 128)
258 ep->emss = 128;
92e7ae71 259 if (ep->emss & 7)
f48fca4d
BP
260 pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
261 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
548ddb19
BP
262 pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
263 ep->emss);
cfdda9d7
SW
264}
265
266static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
267{
cfdda9d7
SW
268 enum c4iw_ep_state state;
269
2f5b48c3 270 mutex_lock(&epc->mutex);
cfdda9d7 271 state = epc->state;
2f5b48c3 272 mutex_unlock(&epc->mutex);
cfdda9d7
SW
273 return state;
274}
275
276static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
277{
278 epc->state = new;
279}
280
281static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
282{
2f5b48c3 283 mutex_lock(&epc->mutex);
548ddb19 284 pr_debug("%s -> %s\n", states[epc->state], states[new]);
cfdda9d7 285 __state_set(epc, new);
2f5b48c3 286 mutex_unlock(&epc->mutex);
cfdda9d7
SW
287 return;
288}
289
4a740838
H
290static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
291{
292 struct sk_buff *skb;
293 unsigned int i;
294 size_t len;
295
296 len = roundup(sizeof(union cpl_wr_size), 16);
297 for (i = 0; i < size; i++) {
298 skb = alloc_skb(len, GFP_KERNEL);
299 if (!skb)
300 goto fail;
301 skb_queue_tail(ep_skb_list, skb);
302 }
303 return 0;
304fail:
305 skb_queue_purge(ep_skb_list);
306 return -ENOMEM;
307}
308
cfdda9d7
SW
309static void *alloc_ep(int size, gfp_t gfp)
310{
311 struct c4iw_ep_common *epc;
312
313 epc = kzalloc(size, gfp);
314 if (epc) {
2015f26c 315 epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
ef885dc6
SW
316 if (!epc->wr_waitp) {
317 kfree(epc);
318 epc = NULL;
319 goto out;
320 }
cfdda9d7 321 kref_init(&epc->kref);
2f5b48c3 322 mutex_init(&epc->mutex);
ef885dc6 323 c4iw_init_wr_wait(epc->wr_waitp);
cfdda9d7 324 }
548ddb19 325 pr_debug("alloc ep %p\n", epc);
ef885dc6 326out:
cfdda9d7
SW
327 return epc;
328}
329
944661dd
H
330static void remove_ep_tid(struct c4iw_ep *ep)
331{
332 unsigned long flags;
333
334 spin_lock_irqsave(&ep->com.dev->lock, flags);
335 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
37eb816c
SW
336 if (idr_is_empty(&ep->com.dev->hwtid_idr))
337 wake_up(&ep->com.dev->wait);
944661dd
H
338 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
339}
340
341static void insert_ep_tid(struct c4iw_ep *ep)
342{
343 unsigned long flags;
344
345 spin_lock_irqsave(&ep->com.dev->lock, flags);
346 _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
347 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
348}
349
350/*
351 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
352 */
353static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
354{
355 struct c4iw_ep *ep;
356 unsigned long flags;
357
358 spin_lock_irqsave(&dev->lock, flags);
359 ep = idr_find(&dev->hwtid_idr, tid);
360 if (ep)
361 c4iw_get_ep(&ep->com);
362 spin_unlock_irqrestore(&dev->lock, flags);
363 return ep;
364}
365
f86fac79
H
366/*
367 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
368 */
369static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
370 unsigned int stid)
371{
372 struct c4iw_listen_ep *ep;
373 unsigned long flags;
374
375 spin_lock_irqsave(&dev->lock, flags);
376 ep = idr_find(&dev->stid_idr, stid);
377 if (ep)
378 c4iw_get_ep(&ep->com);
379 spin_unlock_irqrestore(&dev->lock, flags);
380 return ep;
381}
382
cfdda9d7
SW
383void _c4iw_free_ep(struct kref *kref)
384{
385 struct c4iw_ep *ep;
386
387 ep = container_of(kref, struct c4iw_ep, com.kref);
548ddb19 388 pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
325abead
VP
389 if (test_bit(QP_REFERENCED, &ep->com.flags))
390 deref_qp(ep);
cfdda9d7 391 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
84cc6ac6
H
392 if (ep->com.remote_addr.ss_family == AF_INET6) {
393 struct sockaddr_in6 *sin6 =
394 (struct sockaddr_in6 *)
170003c8 395 &ep->com.local_addr;
84cc6ac6
H
396
397 cxgb4_clip_release(
398 ep->com.dev->rdev.lldi.ports[0],
399 (const u32 *)&sin6->sin6_addr.s6_addr,
400 1);
401 }
1dec4cec
GG
402 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
403 ep->com.local_addr.ss_family);
cfdda9d7
SW
404 dst_release(ep->dst);
405 cxgb4_l2t_release(ep->l2t);
c878b706
H
406 if (ep->mpa_skb)
407 kfree_skb(ep->mpa_skb);
cfdda9d7 408 }
4a740838
H
409 if (!skb_queue_empty(&ep->com.ep_skb_list))
410 skb_queue_purge(&ep->com.ep_skb_list);
2015f26c 411 c4iw_put_wr_wait(ep->com.wr_waitp);
cfdda9d7
SW
412 kfree(ep);
413}
414
415static void release_ep_resources(struct c4iw_ep *ep)
416{
417 set_bit(RELEASE_RESOURCES, &ep->com.flags);
944661dd
H
418
419 /*
420 * If we have a hwtid, then remove it from the idr table
421 * so lookups will no longer find this endpoint. Otherwise
422 * we have a race where one thread finds the ep ptr just
423 * before the other thread is freeing the ep memory.
424 */
425 if (ep->hwtid != -1)
426 remove_ep_tid(ep);
cfdda9d7
SW
427 c4iw_put_ep(&ep->com);
428}
429
cfdda9d7
SW
430static int status2errno(int status)
431{
432 switch (status) {
433 case CPL_ERR_NONE:
434 return 0;
435 case CPL_ERR_CONN_RESET:
436 return -ECONNRESET;
437 case CPL_ERR_ARP_MISS:
438 return -EHOSTUNREACH;
439 case CPL_ERR_CONN_TIMEDOUT:
440 return -ETIMEDOUT;
441 case CPL_ERR_TCAM_FULL:
442 return -ENOMEM;
443 case CPL_ERR_CONN_EXIST:
444 return -EADDRINUSE;
445 default:
446 return -EIO;
447 }
448}
449
450/*
451 * Try and reuse skbs already allocated...
452 */
453static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
454{
455 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
456 skb_trim(skb, 0);
457 skb_get(skb);
458 skb_reset_transport_header(skb);
459 } else {
460 skb = alloc_skb(len, gfp);
461 }
b38a0ad8 462 t4_set_arp_err_handler(skb, NULL, NULL);
cfdda9d7
SW
463 return skb;
464}
465
830662f6
VP
466static struct net_device *get_real_dev(struct net_device *egress_dev)
467{
11b8e22d 468 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
830662f6
VP
469}
470
cfdda9d7
SW
471static void arp_failure_discard(void *handle, struct sk_buff *skb)
472{
700456bd 473 pr_err("ARP failure\n");
cfdda9d7
SW
474 kfree_skb(skb);
475}
476
64bec74a
H
477static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
478{
479 pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
480}
481
9dec900c 482enum {
8d1f1a6b 483 NUM_FAKE_CPLS = 2,
9dec900c 484 FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
8d1f1a6b 485 FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
9dec900c
H
486};
487
488static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
489{
490 struct c4iw_ep *ep;
491
492 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
493 release_ep_resources(ep);
1dad0ebe 494 kfree_skb(skb);
9dec900c
H
495 return 0;
496}
497
8d1f1a6b
H
498static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
499{
500 struct c4iw_ep *ep;
501
502 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
503 c4iw_put_ep(&ep->parent_ep->com);
504 release_ep_resources(ep);
1dad0ebe 505 kfree_skb(skb);
8d1f1a6b
H
506 return 0;
507}
508
9dec900c
H
509/*
510 * Fake up a special CPL opcode and call sched() so process_work() will call
511 * _put_ep_safe() in a safe context to free the ep resources. This is needed
512 * because ARP error handlers are called in an ATOMIC context, and
513 * _c4iw_free_ep() needs to block.
514 */
8d1f1a6b
H
515static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
516 int cpl)
9dec900c
H
517{
518 struct cpl_act_establish *rpl = cplhdr(skb);
519
520 /* Set our special ARP_FAILURE opcode */
8d1f1a6b 521 rpl->ot.opcode = cpl;
9dec900c
H
522
523 /*
524 * Save ep in the skb->cb area, after where sched() will save the dev
525 * ptr.
526 */
527 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
528 sched(ep->com.dev, skb);
529}
530
531/* Handle an ARP failure for an accept */
532static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
533{
534 struct c4iw_ep *ep = handle;
535
700456bd 536 pr_err("ARP failure during accept - tid %u - dropping connection\n",
9dec900c
H
537 ep->hwtid);
538
539 __state_set(&ep->com, DEAD);
8d1f1a6b 540 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
9dec900c
H
541}
542
cfdda9d7
SW
543/*
544 * Handle an ARP failure for an active open.
545 */
546static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
547{
5dab6d3a
H
548 struct c4iw_ep *ep = handle;
549
700456bd 550 pr_err("ARP failure during connect\n");
5dab6d3a 551 connect_reply_upcall(ep, -EHOSTUNREACH);
9dec900c 552 __state_set(&ep->com, DEAD);
84cc6ac6
H
553 if (ep->com.remote_addr.ss_family == AF_INET6) {
554 struct sockaddr_in6 *sin6 =
170003c8 555 (struct sockaddr_in6 *)&ep->com.local_addr;
84cc6ac6
H
556 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
557 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
558 }
5dab6d3a
H
559 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
560 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
8d1f1a6b 561 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
cfdda9d7
SW
562}
563
564/*
565 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
566 * and send it along.
567 */
568static void abort_arp_failure(void *handle, struct sk_buff *skb)
569{
761e19a5
H
570 int ret;
571 struct c4iw_ep *ep = handle;
572 struct c4iw_rdev *rdev = &ep->com.dev->rdev;
cfdda9d7
SW
573 struct cpl_abort_req *req = cplhdr(skb);
574
548ddb19 575 pr_debug("rdev %p\n", rdev);
cfdda9d7 576 req->cmd = CPL_ABORT_NO_RST;
1dad0ebe 577 skb_get(skb);
761e19a5
H
578 ret = c4iw_ofld_send(rdev, skb);
579 if (ret) {
580 __state_set(&ep->com, DEAD);
581 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
1dad0ebe
RR
582 } else
583 kfree_skb(skb);
cfdda9d7
SW
584}
585
4a740838 586static int send_flowc(struct c4iw_ep *ep)
cfdda9d7 587{
cfdda9d7 588 struct fw_flowc_wr *flowc;
4a740838 589 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
cfdda9d7 590 int i;
ac8e4c69
H
591 u16 vlan = ep->l2t->vlan;
592 int nparams;
593
4a740838
H
594 if (WARN_ON(!skb))
595 return -ENOMEM;
596
ac8e4c69
H
597 if (vlan == CPL_L2T_VLAN_NONE)
598 nparams = 8;
599 else
600 nparams = 9;
cfdda9d7 601
4df864c1 602 flowc = __skb_put(skb, FLOWC_LEN);
cfdda9d7 603
e2ac9628 604 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
ac8e4c69 605 FW_FLOWC_WR_NPARAMS_V(nparams));
4a740838 606 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
e2ac9628 607 16)) | FW_WR_FLOWID_V(ep->hwtid));
cfdda9d7
SW
608
609 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
5167865a 610 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
35b1de55 611 (ep->com.dev->rdev.lldi.pf));
cfdda9d7
SW
612 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
613 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
614 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
615 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
616 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
617 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
618 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
619 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
620 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
621 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
622 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
b408ff28 623 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
cfdda9d7
SW
624 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
625 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
ac8e4c69
H
626 if (nparams == 9) {
627 u16 pri;
628
629 pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
630 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
631 flowc->mnemval[8].val = cpu_to_be32(pri);
632 } else {
633 /* Pad WR to 16 byte boundary */
634 flowc->mnemval[8].mnemonic = 0;
635 flowc->mnemval[8].val = 0;
636 }
cfdda9d7
SW
637 for (i = 0; i < 9; i++) {
638 flowc->mnemval[i].r4[0] = 0;
639 flowc->mnemval[i].r4[1] = 0;
640 flowc->mnemval[i].r4[2] = 0;
641 }
642
643 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
fef4422d 644 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
cfdda9d7
SW
645}
646
4a740838 647static int send_halfclose(struct c4iw_ep *ep)
cfdda9d7 648{
4a740838 649 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
29fb6f42 650 u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
cfdda9d7 651
548ddb19 652 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
4a740838 653 if (WARN_ON(!skb))
cfdda9d7 654 return -ENOMEM;
4a740838 655
29fb6f42
VP
656 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
657 NULL, arp_failure_discard);
658
cfdda9d7
SW
659 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
660}
661
4a740838 662static int send_abort(struct c4iw_ep *ep)
cfdda9d7 663{
a7e1a97f 664 u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
4a740838 665 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
cfdda9d7 666
548ddb19 667 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
4a740838 668 if (WARN_ON(!req_skb))
cfdda9d7 669 return -ENOMEM;
4a740838 670
a7e1a97f
VP
671 cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
672 ep, abort_arp_failure);
673
4a740838 674 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
cfdda9d7
SW
675}
676
677static int send_connect(struct c4iw_ep *ep)
678{
963cab50
H
679 struct cpl_act_open_req *req = NULL;
680 struct cpl_t5_act_open_req *t5req = NULL;
681 struct cpl_t6_act_open_req *t6req = NULL;
682 struct cpl_act_open_req6 *req6 = NULL;
683 struct cpl_t5_act_open_req6 *t5req6 = NULL;
684 struct cpl_t6_act_open_req6 *t6req6 = NULL;
cfdda9d7
SW
685 struct sk_buff *skb;
686 u64 opt0;
687 u32 opt2;
688 unsigned int mtu_idx;
cc516700 689 u32 wscale;
963cab50 690 int win, sizev4, sizev6, wrlen;
9eccfe10 691 struct sockaddr_in *la = (struct sockaddr_in *)
170003c8 692 &ep->com.local_addr;
9eccfe10 693 struct sockaddr_in *ra = (struct sockaddr_in *)
170003c8 694 &ep->com.remote_addr;
9eccfe10 695 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
170003c8 696 &ep->com.local_addr;
9eccfe10 697 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
170003c8 698 &ep->com.remote_addr;
84cc6ac6 699 int ret;
963cab50
H
700 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
701 u32 isn = (prandom_u32() & ~7UL) - 1;
192539f4
GG
702 struct net_device *netdev;
703 u64 params;
704
705 netdev = ep->com.dev->rdev.lldi.ports[0];
963cab50
H
706
707 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
708 case CHELSIO_T4:
709 sizev4 = sizeof(struct cpl_act_open_req);
710 sizev6 = sizeof(struct cpl_act_open_req6);
711 break;
712 case CHELSIO_T5:
713 sizev4 = sizeof(struct cpl_t5_act_open_req);
714 sizev6 = sizeof(struct cpl_t5_act_open_req6);
715 break;
716 case CHELSIO_T6:
717 sizev4 = sizeof(struct cpl_t6_act_open_req);
718 sizev6 = sizeof(struct cpl_t6_act_open_req6);
719 break;
720 default:
721 pr_err("T%d Chip is not supported\n",
722 CHELSIO_CHIP_VERSION(adapter_type));
723 return -EINVAL;
724 }
830662f6
VP
725
726 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
727 roundup(sizev4, 16) :
728 roundup(sizev6, 16);
cfdda9d7 729
548ddb19 730 pr_debug("ep %p atid %u\n", ep, ep->atid);
cfdda9d7
SW
731
732 skb = get_skb(NULL, wrlen, GFP_KERNEL);
733 if (!skb) {
700456bd 734 pr_err("%s - failed to alloc skb\n", __func__);
cfdda9d7
SW
735 return -ENOMEM;
736 }
d4f1a5c6 737 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cfdda9d7 738
44c6d069
VP
739 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
740 enable_tcp_timestamps,
741 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
cc516700 742 wscale = cxgb_compute_wscale(rcv_win);
b408ff28
HS
743
744 /*
745 * Specify the largest window that will fit in opt0. The
746 * remainder will be specified in the rx_data_ack.
747 */
748 win = ep->rcv_win >> 10;
d7990b0c
AB
749 if (win > RCV_BUFSIZ_M)
750 win = RCV_BUFSIZ_M;
b408ff28 751
6c53e938 752 opt0 = (nocong ? NO_CONG_F : 0) |
d7990b0c 753 KEEP_ALIVE_F |
6c53e938 754 DELACK_F |
d7990b0c
AB
755 WND_SCALE_V(wscale) |
756 MSS_IDX_V(mtu_idx) |
757 L2T_IDX_V(ep->l2t->idx) |
758 TX_CHAN_V(ep->tx_chan) |
759 SMAC_SEL_V(ep->smac_idx) |
ac8e4c69 760 DSCP_V(ep->tos >> 2) |
d7990b0c
AB
761 ULP_MODE_V(ULP_MODE_TCPDDP) |
762 RCV_BUFSIZ_V(win);
763 opt2 = RX_CHANNEL_V(0) |
6c53e938 764 CCTRL_ECN_V(enable_ecn) |
d7990b0c 765 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
cfdda9d7 766 if (enable_tcp_timestamps)
6c53e938 767 opt2 |= TSTAMPS_EN_F;
cfdda9d7 768 if (enable_tcp_sack)
6c53e938 769 opt2 |= SACK_EN_F;
cfdda9d7 770 if (wscale && enable_tcp_window_scaling)
d7990b0c 771 opt2 |= WND_SCALE_EN_F;
963cab50
H
772 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
773 if (peer2peer)
774 isn += 4;
775
d7990b0c 776 opt2 |= T5_OPT_2_VALID_F;
cf7fe64a 777 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
0b741047 778 opt2 |= T5_ISS_F;
92e5011a 779 }
84cc6ac6 780
192539f4
GG
781 params = cxgb4_select_ntuple(netdev, ep->l2t);
782
84cc6ac6
H
783 if (ep->com.remote_addr.ss_family == AF_INET6)
784 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
785 (const u32 *)&la6->sin6_addr.s6_addr, 1);
786
5dab6d3a 787 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
cfdda9d7 788
963cab50
H
789 if (ep->com.remote_addr.ss_family == AF_INET) {
790 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
791 case CHELSIO_T4:
4df864c1 792 req = skb_put(skb, wrlen);
830662f6 793 INIT_TP_WR(req, 0);
963cab50
H
794 break;
795 case CHELSIO_T5:
4df864c1 796 t5req = skb_put(skb, wrlen);
963cab50
H
797 INIT_TP_WR(t5req, 0);
798 req = (struct cpl_act_open_req *)t5req;
799 break;
800 case CHELSIO_T6:
4df864c1 801 t6req = skb_put(skb, wrlen);
963cab50
H
802 INIT_TP_WR(t6req, 0);
803 req = (struct cpl_act_open_req *)t6req;
804 t5req = (struct cpl_t5_act_open_req *)t6req;
805 break;
806 default:
807 pr_err("T%d Chip is not supported\n",
808 CHELSIO_CHIP_VERSION(adapter_type));
809 ret = -EINVAL;
810 goto clip_release;
811 }
812
813 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
814 ((ep->rss_qid<<14) | ep->atid)));
815 req->local_port = la->sin_port;
816 req->peer_port = ra->sin_port;
817 req->local_ip = la->sin_addr.s_addr;
818 req->peer_ip = ra->sin_addr.s_addr;
819 req->opt0 = cpu_to_be64(opt0);
820
821 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
192539f4 822 req->params = cpu_to_be32(params);
830662f6
VP
823 req->opt2 = cpu_to_be32(opt2);
824 } else {
192539f4
GG
825 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
826 t5req->params =
827 cpu_to_be64(FILTER_TUPLE_V(params));
828 t5req->rsvd = cpu_to_be32(isn);
70d72568 829 pr_debug("snd_isn %u\n", t5req->rsvd);
192539f4
GG
830 t5req->opt2 = cpu_to_be32(opt2);
831 } else {
832 t6req->params =
833 cpu_to_be64(FILTER_TUPLE_V(params));
834 t6req->rsvd = cpu_to_be32(isn);
70d72568 835 pr_debug("snd_isn %u\n", t6req->rsvd);
192539f4
GG
836 t6req->opt2 = cpu_to_be32(opt2);
837 }
963cab50
H
838 }
839 } else {
840 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
841 case CHELSIO_T4:
4df864c1 842 req6 = skb_put(skb, wrlen);
830662f6 843 INIT_TP_WR(req6, 0);
963cab50
H
844 break;
845 case CHELSIO_T5:
4df864c1 846 t5req6 = skb_put(skb, wrlen);
963cab50
H
847 INIT_TP_WR(t5req6, 0);
848 req6 = (struct cpl_act_open_req6 *)t5req6;
849 break;
850 case CHELSIO_T6:
4df864c1 851 t6req6 = skb_put(skb, wrlen);
963cab50
H
852 INIT_TP_WR(t6req6, 0);
853 req6 = (struct cpl_act_open_req6 *)t6req6;
854 t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
855 break;
856 default:
857 pr_err("T%d Chip is not supported\n",
858 CHELSIO_CHIP_VERSION(adapter_type));
859 ret = -EINVAL;
860 goto clip_release;
861 }
862
863 OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
864 ((ep->rss_qid<<14)|ep->atid)));
865 req6->local_port = la6->sin6_port;
866 req6->peer_port = ra6->sin6_port;
867 req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
868 req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
869 req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
870 req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
871 req6->opt0 = cpu_to_be64(opt0);
872
873 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
192539f4
GG
874 req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev,
875 ep->l2t));
830662f6 876 req6->opt2 = cpu_to_be32(opt2);
830662f6 877 } else {
192539f4
GG
878 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
879 t5req6->params =
880 cpu_to_be64(FILTER_TUPLE_V(params));
881 t5req6->rsvd = cpu_to_be32(isn);
70d72568 882 pr_debug("snd_isn %u\n", t5req6->rsvd);
192539f4
GG
883 t5req6->opt2 = cpu_to_be32(opt2);
884 } else {
885 t6req6->params =
886 cpu_to_be64(FILTER_TUPLE_V(params));
887 t6req6->rsvd = cpu_to_be32(isn);
70d72568 888 pr_debug("snd_isn %u\n", t6req6->rsvd);
192539f4
GG
889 t6req6->opt2 = cpu_to_be32(opt2);
890 }
891
830662f6 892 }
f079af7a
VP
893 }
894
793dad94 895 set_bit(ACT_OPEN_REQ, &ep->com.history);
84cc6ac6 896 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
963cab50 897clip_release:
84cc6ac6
H
898 if (ret && ep->com.remote_addr.ss_family == AF_INET6)
899 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
900 (const u32 *)&la6->sin6_addr.s6_addr, 1);
901 return ret;
cfdda9d7
SW
902}
903
caa6c9f2
H
904static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
905 u8 mpa_rev_to_use)
cfdda9d7 906{
caa6c9f2 907 int mpalen, wrlen, ret;
cfdda9d7
SW
908 struct fw_ofld_tx_data_wr *req;
909 struct mpa_message *mpa;
d2fe99e8 910 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7 911
548ddb19
BP
912 pr_debug("ep %p tid %u pd_len %d\n",
913 ep, ep->hwtid, ep->plen);
cfdda9d7 914
cfdda9d7 915 mpalen = sizeof(*mpa) + ep->plen;
d2fe99e8
KS
916 if (mpa_rev_to_use == 2)
917 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
918 wrlen = roundup(mpalen + sizeof *req, 16);
919 skb = get_skb(skb, wrlen, GFP_KERNEL);
920 if (!skb) {
921 connect_reply_upcall(ep, -ENOMEM);
caa6c9f2 922 return -ENOMEM;
cfdda9d7
SW
923 }
924 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
925
b080db58 926 req = skb_put_zero(skb, wrlen);
cfdda9d7 927 req->op_to_immdlen = cpu_to_be32(
e2ac9628
HS
928 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
929 FW_WR_COMPL_F |
930 FW_WR_IMMDLEN_V(mpalen));
cfdda9d7 931 req->flowid_len16 = cpu_to_be32(
e2ac9628
HS
932 FW_WR_FLOWID_V(ep->hwtid) |
933 FW_WR_LEN16_V(wrlen >> 4));
cfdda9d7
SW
934 req->plen = cpu_to_be32(mpalen);
935 req->tunnel_to_proxy = cpu_to_be32(
e2ac9628
HS
936 FW_OFLD_TX_DATA_WR_FLUSH_F |
937 FW_OFLD_TX_DATA_WR_SHOVE_F);
cfdda9d7
SW
938
939 mpa = (struct mpa_message *)(req + 1);
940 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
3d4e7994
H
941
942 mpa->flags = 0;
943 if (crc_enabled)
944 mpa->flags |= MPA_CRC;
945 if (markers_enabled) {
946 mpa->flags |= MPA_MARKERS;
947 ep->mpa_attr.recv_marker_enabled = 1;
948 } else {
949 ep->mpa_attr.recv_marker_enabled = 0;
950 }
951 if (mpa_rev_to_use == 2)
952 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
953
cfdda9d7 954 mpa->private_data_size = htons(ep->plen);
d2fe99e8 955 mpa->revision = mpa_rev_to_use;
01b225e1 956 if (mpa_rev_to_use == 1) {
d2fe99e8 957 ep->tried_with_mpa_v1 = 1;
01b225e1
KS
958 ep->retry_with_mpa_v1 = 0;
959 }
d2fe99e8
KS
960
961 if (mpa_rev_to_use == 2) {
f747c34a
RD
962 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
963 sizeof (struct mpa_v2_conn_params));
548ddb19 964 pr_debug("initiator ird %u ord %u\n", ep->ird,
a9a42886 965 ep->ord);
d2fe99e8
KS
966 mpa_v2_params.ird = htons((u16)ep->ird);
967 mpa_v2_params.ord = htons((u16)ep->ord);
968
969 if (peer2peer) {
970 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
971 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
972 mpa_v2_params.ord |=
973 htons(MPA_V2_RDMA_WRITE_RTR);
974 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
975 mpa_v2_params.ord |=
976 htons(MPA_V2_RDMA_READ_RTR);
977 }
978 memcpy(mpa->private_data, &mpa_v2_params,
979 sizeof(struct mpa_v2_conn_params));
cfdda9d7 980
d2fe99e8
KS
981 if (ep->plen)
982 memcpy(mpa->private_data +
983 sizeof(struct mpa_v2_conn_params),
984 ep->mpa_pkt + sizeof(*mpa), ep->plen);
985 } else
986 if (ep->plen)
987 memcpy(mpa->private_data,
988 ep->mpa_pkt + sizeof(*mpa), ep->plen);
cfdda9d7
SW
989
990 /*
991 * Reference the mpa skb. This ensures the data area
992 * will remain in memory until the hw acks the tx.
993 * Function fw4_ack() will deref it.
994 */
995 skb_get(skb);
996 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
cfdda9d7 997 ep->mpa_skb = skb;
caa6c9f2
H
998 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
999 if (ret)
1000 return ret;
cfdda9d7 1001 start_ep_timer(ep);
a7db89eb 1002 __state_set(&ep->com, MPA_REQ_SENT);
cfdda9d7 1003 ep->mpa_attr.initiator = 1;
9c88aa00 1004 ep->snd_seq += mpalen;
caa6c9f2 1005 return ret;
cfdda9d7
SW
1006}
1007
1008static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1009{
1010 int mpalen, wrlen;
1011 struct fw_ofld_tx_data_wr *req;
1012 struct mpa_message *mpa;
1013 struct sk_buff *skb;
d2fe99e8 1014 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7 1015
548ddb19
BP
1016 pr_debug("ep %p tid %u pd_len %d\n",
1017 ep, ep->hwtid, ep->plen);
cfdda9d7
SW
1018
1019 mpalen = sizeof(*mpa) + plen;
d2fe99e8
KS
1020 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1021 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
1022 wrlen = roundup(mpalen + sizeof *req, 16);
1023
1024 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1025 if (!skb) {
700456bd 1026 pr_err("%s - cannot alloc skb!\n", __func__);
cfdda9d7
SW
1027 return -ENOMEM;
1028 }
1029 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1030
b080db58 1031 req = skb_put_zero(skb, wrlen);
cfdda9d7 1032 req->op_to_immdlen = cpu_to_be32(
e2ac9628
HS
1033 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1034 FW_WR_COMPL_F |
1035 FW_WR_IMMDLEN_V(mpalen));
cfdda9d7 1036 req->flowid_len16 = cpu_to_be32(
e2ac9628
HS
1037 FW_WR_FLOWID_V(ep->hwtid) |
1038 FW_WR_LEN16_V(wrlen >> 4));
cfdda9d7
SW
1039 req->plen = cpu_to_be32(mpalen);
1040 req->tunnel_to_proxy = cpu_to_be32(
e2ac9628
HS
1041 FW_OFLD_TX_DATA_WR_FLUSH_F |
1042 FW_OFLD_TX_DATA_WR_SHOVE_F);
cfdda9d7
SW
1043
1044 mpa = (struct mpa_message *)(req + 1);
1045 memset(mpa, 0, sizeof(*mpa));
1046 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1047 mpa->flags = MPA_REJECT;
fe7e0a4d 1048 mpa->revision = ep->mpa_attr.version;
cfdda9d7 1049 mpa->private_data_size = htons(plen);
d2fe99e8
KS
1050
1051 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1052 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
f747c34a
RD
1053 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1054 sizeof (struct mpa_v2_conn_params));
d2fe99e8
KS
1055 mpa_v2_params.ird = htons(((u16)ep->ird) |
1056 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1057 0));
1058 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1059 (p2p_type ==
1060 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1061 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1062 FW_RI_INIT_P2PTYPE_READ_REQ ?
1063 MPA_V2_RDMA_READ_RTR : 0) : 0));
1064 memcpy(mpa->private_data, &mpa_v2_params,
1065 sizeof(struct mpa_v2_conn_params));
1066
1067 if (ep->plen)
1068 memcpy(mpa->private_data +
1069 sizeof(struct mpa_v2_conn_params), pdata, plen);
1070 } else
1071 if (plen)
1072 memcpy(mpa->private_data, pdata, plen);
cfdda9d7
SW
1073
1074 /*
1075 * Reference the mpa skb again. This ensures the data area
1076 * will remain in memory until the hw acks the tx.
1077 * Function fw4_ack() will deref it.
1078 */
1079 skb_get(skb);
1080 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
64bec74a 1081 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
cfdda9d7 1082 ep->mpa_skb = skb;
9c88aa00 1083 ep->snd_seq += mpalen;
cfdda9d7
SW
1084 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1085}
1086
1087static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1088{
1089 int mpalen, wrlen;
1090 struct fw_ofld_tx_data_wr *req;
1091 struct mpa_message *mpa;
1092 struct sk_buff *skb;
d2fe99e8 1093 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7 1094
548ddb19
BP
1095 pr_debug("ep %p tid %u pd_len %d\n",
1096 ep, ep->hwtid, ep->plen);
cfdda9d7
SW
1097
1098 mpalen = sizeof(*mpa) + plen;
d2fe99e8
KS
1099 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1100 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
1101 wrlen = roundup(mpalen + sizeof *req, 16);
1102
1103 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1104 if (!skb) {
700456bd 1105 pr_err("%s - cannot alloc skb!\n", __func__);
cfdda9d7
SW
1106 return -ENOMEM;
1107 }
1108 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1109
b080db58 1110 req = skb_put_zero(skb, wrlen);
cfdda9d7 1111 req->op_to_immdlen = cpu_to_be32(
e2ac9628
HS
1112 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1113 FW_WR_COMPL_F |
1114 FW_WR_IMMDLEN_V(mpalen));
cfdda9d7 1115 req->flowid_len16 = cpu_to_be32(
e2ac9628
HS
1116 FW_WR_FLOWID_V(ep->hwtid) |
1117 FW_WR_LEN16_V(wrlen >> 4));
cfdda9d7
SW
1118 req->plen = cpu_to_be32(mpalen);
1119 req->tunnel_to_proxy = cpu_to_be32(
e2ac9628
HS
1120 FW_OFLD_TX_DATA_WR_FLUSH_F |
1121 FW_OFLD_TX_DATA_WR_SHOVE_F);
cfdda9d7
SW
1122
1123 mpa = (struct mpa_message *)(req + 1);
1124 memset(mpa, 0, sizeof(*mpa));
1125 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
3d4e7994
H
1126 mpa->flags = 0;
1127 if (ep->mpa_attr.crc_enabled)
1128 mpa->flags |= MPA_CRC;
1129 if (ep->mpa_attr.recv_marker_enabled)
1130 mpa->flags |= MPA_MARKERS;
d2fe99e8 1131 mpa->revision = ep->mpa_attr.version;
cfdda9d7 1132 mpa->private_data_size = htons(plen);
d2fe99e8
KS
1133
1134 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1135 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
f747c34a
RD
1136 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1137 sizeof (struct mpa_v2_conn_params));
d2fe99e8
KS
1138 mpa_v2_params.ird = htons((u16)ep->ird);
1139 mpa_v2_params.ord = htons((u16)ep->ord);
1140 if (peer2peer && (ep->mpa_attr.p2p_type !=
1141 FW_RI_INIT_P2PTYPE_DISABLED)) {
1142 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1143
1144 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1145 mpa_v2_params.ord |=
1146 htons(MPA_V2_RDMA_WRITE_RTR);
1147 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1148 mpa_v2_params.ord |=
1149 htons(MPA_V2_RDMA_READ_RTR);
1150 }
1151
1152 memcpy(mpa->private_data, &mpa_v2_params,
1153 sizeof(struct mpa_v2_conn_params));
1154
1155 if (ep->plen)
1156 memcpy(mpa->private_data +
1157 sizeof(struct mpa_v2_conn_params), pdata, plen);
1158 } else
1159 if (plen)
1160 memcpy(mpa->private_data, pdata, plen);
cfdda9d7
SW
1161
1162 /*
1163 * Reference the mpa skb. This ensures the data area
1164 * will remain in memory until the hw acks the tx.
1165 * Function fw4_ack() will deref it.
1166 */
1167 skb_get(skb);
64bec74a 1168 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
cfdda9d7 1169 ep->mpa_skb = skb;
a7db89eb 1170 __state_set(&ep->com, MPA_REP_SENT);
9c88aa00 1171 ep->snd_seq += mpalen;
cfdda9d7
SW
1172 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1173}
1174
1175static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1176{
1177 struct c4iw_ep *ep;
1178 struct cpl_act_establish *req = cplhdr(skb);
1179 unsigned int tid = GET_TID(req);
6c53e938 1180 unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
cfdda9d7 1181 struct tid_info *t = dev->rdev.lldi.tids;
fef4422d 1182 int ret;
cfdda9d7
SW
1183
1184 ep = lookup_atid(t, atid);
1185
548ddb19 1186 pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
a9a42886 1187 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
cfdda9d7 1188
a7db89eb 1189 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
1190 dst_confirm(ep->dst);
1191
1192 /* setup the hwtid for this connection */
1193 ep->hwtid = tid;
1dec4cec 1194 cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
944661dd 1195 insert_ep_tid(ep);
cfdda9d7
SW
1196
1197 ep->snd_seq = be32_to_cpu(req->snd_isn);
1198 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1199
1200 set_emss(ep, ntohs(req->tcp_opt));
1201
1202 /* dealloc the atid */
793dad94 1203 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cfdda9d7 1204 cxgb4_free_atid(t, atid);
793dad94 1205 set_bit(ACT_ESTAB, &ep->com.history);
cfdda9d7
SW
1206
1207 /* start MPA negotiation */
4a740838 1208 ret = send_flowc(ep);
fef4422d
H
1209 if (ret)
1210 goto err;
d2fe99e8 1211 if (ep->retry_with_mpa_v1)
caa6c9f2 1212 ret = send_mpa_req(ep, skb, 1);
d2fe99e8 1213 else
caa6c9f2
H
1214 ret = send_mpa_req(ep, skb, mpa_rev);
1215 if (ret)
1216 goto err;
a7db89eb 1217 mutex_unlock(&ep->com.mutex);
cfdda9d7 1218 return 0;
fef4422d
H
1219err:
1220 mutex_unlock(&ep->com.mutex);
1221 connect_reply_upcall(ep, -ENOMEM);
1222 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1223 return 0;
cfdda9d7
SW
1224}
1225
be13b2df 1226static void close_complete_upcall(struct c4iw_ep *ep, int status)
cfdda9d7
SW
1227{
1228 struct iw_cm_event event;
1229
548ddb19 1230 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1231 memset(&event, 0, sizeof(event));
1232 event.event = IW_CM_EVENT_CLOSE;
be13b2df 1233 event.status = status;
cfdda9d7 1234 if (ep->com.cm_id) {
a9a42886
JP
1235 pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
1236 ep, ep->com.cm_id, ep->hwtid);
cfdda9d7 1237 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
9ca6f7cf 1238 deref_cm_id(&ep->com);
793dad94 1239 set_bit(CLOSE_UPCALL, &ep->com.history);
cfdda9d7
SW
1240 }
1241}
1242
cfdda9d7
SW
1243static void peer_close_upcall(struct c4iw_ep *ep)
1244{
1245 struct iw_cm_event event;
1246
548ddb19 1247 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1248 memset(&event, 0, sizeof(event));
1249 event.event = IW_CM_EVENT_DISCONNECT;
1250 if (ep->com.cm_id) {
a9a42886
JP
1251 pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
1252 ep, ep->com.cm_id, ep->hwtid);
cfdda9d7 1253 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
793dad94 1254 set_bit(DISCONN_UPCALL, &ep->com.history);
cfdda9d7
SW
1255 }
1256}
1257
1258static void peer_abort_upcall(struct c4iw_ep *ep)
1259{
1260 struct iw_cm_event event;
1261
548ddb19 1262 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1263 memset(&event, 0, sizeof(event));
1264 event.event = IW_CM_EVENT_CLOSE;
1265 event.status = -ECONNRESET;
1266 if (ep->com.cm_id) {
a9a42886
JP
1267 pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
1268 ep->com.cm_id, ep->hwtid);
cfdda9d7 1269 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
9ca6f7cf 1270 deref_cm_id(&ep->com);
793dad94 1271 set_bit(ABORT_UPCALL, &ep->com.history);
cfdda9d7
SW
1272 }
1273}
1274
1275static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1276{
1277 struct iw_cm_event event;
1278
548ddb19
BP
1279 pr_debug("ep %p tid %u status %d\n",
1280 ep, ep->hwtid, status);
cfdda9d7
SW
1281 memset(&event, 0, sizeof(event));
1282 event.event = IW_CM_EVENT_CONNECT_REPLY;
1283 event.status = status;
24d44a39
SW
1284 memcpy(&event.local_addr, &ep->com.local_addr,
1285 sizeof(ep->com.local_addr));
1286 memcpy(&event.remote_addr, &ep->com.remote_addr,
1287 sizeof(ep->com.remote_addr));
cfdda9d7
SW
1288
1289 if ((status == 0) || (status == -ECONNREFUSED)) {
d2fe99e8
KS
1290 if (!ep->tried_with_mpa_v1) {
1291 /* this means MPA_v2 is used */
158c776d
H
1292 event.ord = ep->ird;
1293 event.ird = ep->ord;
d2fe99e8
KS
1294 event.private_data_len = ep->plen -
1295 sizeof(struct mpa_v2_conn_params);
1296 event.private_data = ep->mpa_pkt +
1297 sizeof(struct mpa_message) +
1298 sizeof(struct mpa_v2_conn_params);
1299 } else {
1300 /* this means MPA_v1 is used */
158c776d
H
1301 event.ord = cur_max_read_depth(ep->com.dev);
1302 event.ird = cur_max_read_depth(ep->com.dev);
d2fe99e8
KS
1303 event.private_data_len = ep->plen;
1304 event.private_data = ep->mpa_pkt +
1305 sizeof(struct mpa_message);
1306 }
cfdda9d7 1307 }
85963e4c 1308
548ddb19 1309 pr_debug("ep %p tid %u status %d\n", ep,
a9a42886 1310 ep->hwtid, status);
793dad94 1311 set_bit(CONN_RPL_UPCALL, &ep->com.history);
85963e4c
RD
1312 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1313
9ca6f7cf
H
1314 if (status < 0)
1315 deref_cm_id(&ep->com);
cfdda9d7
SW
1316}
1317
be13b2df 1318static int connect_request_upcall(struct c4iw_ep *ep)
cfdda9d7
SW
1319{
1320 struct iw_cm_event event;
be13b2df 1321 int ret;
cfdda9d7 1322
548ddb19 1323 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1324 memset(&event, 0, sizeof(event));
1325 event.event = IW_CM_EVENT_CONNECT_REQUEST;
24d44a39
SW
1326 memcpy(&event.local_addr, &ep->com.local_addr,
1327 sizeof(ep->com.local_addr));
1328 memcpy(&event.remote_addr, &ep->com.remote_addr,
1329 sizeof(ep->com.remote_addr));
cfdda9d7 1330 event.provider_data = ep;
d2fe99e8
KS
1331 if (!ep->tried_with_mpa_v1) {
1332 /* this means MPA_v2 is used */
1333 event.ord = ep->ord;
1334 event.ird = ep->ird;
1335 event.private_data_len = ep->plen -
1336 sizeof(struct mpa_v2_conn_params);
1337 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1338 sizeof(struct mpa_v2_conn_params);
1339 } else {
1340 /* this means MPA_v1 is used. Send max supported */
4c2c5763
HS
1341 event.ord = cur_max_read_depth(ep->com.dev);
1342 event.ird = cur_max_read_depth(ep->com.dev);
d2fe99e8
KS
1343 event.private_data_len = ep->plen;
1344 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1345 }
be13b2df
SW
1346 c4iw_get_ep(&ep->com);
1347 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1348 &event);
1349 if (ret)
1350 c4iw_put_ep(&ep->com);
793dad94 1351 set_bit(CONNREQ_UPCALL, &ep->com.history);
cfdda9d7 1352 c4iw_put_ep(&ep->parent_ep->com);
be13b2df 1353 return ret;
cfdda9d7
SW
1354}
1355
1356static void established_upcall(struct c4iw_ep *ep)
1357{
1358 struct iw_cm_event event;
1359
548ddb19 1360 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1361 memset(&event, 0, sizeof(event));
1362 event.event = IW_CM_EVENT_ESTABLISHED;
3dd9a5dc
H
1363 event.ird = ep->ord;
1364 event.ord = ep->ird;
cfdda9d7 1365 if (ep->com.cm_id) {
548ddb19 1366 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7 1367 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
793dad94 1368 set_bit(ESTAB_UPCALL, &ep->com.history);
cfdda9d7
SW
1369 }
1370}
1371
1372static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1373{
cfdda9d7 1374 struct sk_buff *skb;
6e3b6fc2
VP
1375 u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
1376 u32 credit_dack;
cfdda9d7 1377
548ddb19
BP
1378 pr_debug("ep %p tid %u credits %u\n",
1379 ep, ep->hwtid, credits);
cfdda9d7
SW
1380 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1381 if (!skb) {
700456bd 1382 pr_err("update_rx_credits - cannot alloc skb!\n");
cfdda9d7
SW
1383 return 0;
1384 }
1385
b408ff28
HS
1386 /*
1387 * If we couldn't specify the entire rcv window at connection setup
1388 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1389 * then add the overage in to the credits returned.
1390 */
d7990b0c
AB
1391 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1392 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
b408ff28 1393
6e3b6fc2
VP
1394 credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
1395 RX_DACK_MODE_V(dack_mode);
1396
1397 cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
1398 credit_dack);
1399
cfdda9d7
SW
1400 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1401 return credits;
1402}
1403
4c2c5763
HS
1404#define RELAXED_IRD_NEGOTIATION 1
1405
f8e1e1d1
H
1406/*
1407 * process_mpa_reply - process streaming mode MPA reply
1408 *
1409 * Returns:
1410 *
1411 * 0 upon success indicating a connect request was delivered to the ULP
1412 * or the mpa request is incomplete but valid so far.
1413 *
1414 * 1 if a failure requires the caller to close the connection.
1415 *
1416 * 2 if a failure requires the caller to abort the connection.
1417 */
cc18b939 1418static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
cfdda9d7
SW
1419{
1420 struct mpa_message *mpa;
d2fe99e8 1421 struct mpa_v2_conn_params *mpa_v2_params;
cfdda9d7 1422 u16 plen;
d2fe99e8
KS
1423 u16 resp_ird, resp_ord;
1424 u8 rtr_mismatch = 0, insuff_ird = 0;
cfdda9d7
SW
1425 struct c4iw_qp_attributes attrs;
1426 enum c4iw_qp_attr_mask mask;
1427 int err;
cc18b939 1428 int disconnect = 0;
cfdda9d7 1429
548ddb19 1430 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7 1431
cfdda9d7
SW
1432 /*
1433 * If we get more than the supported amount of private data
1434 * then we must fail this connection.
1435 */
1436 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1437 err = -EINVAL;
da1cecdf 1438 goto err_stop_timer;
cfdda9d7
SW
1439 }
1440
1441 /*
1442 * copy the new data into our accumulation buffer.
1443 */
1444 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1445 skb->len);
1446 ep->mpa_pkt_len += skb->len;
1447
1448 /*
1449 * if we don't even have the mpa message, then bail.
1450 */
1451 if (ep->mpa_pkt_len < sizeof(*mpa))
cc18b939 1452 return 0;
cfdda9d7
SW
1453 mpa = (struct mpa_message *) ep->mpa_pkt;
1454
1455 /* Validate MPA header. */
d2fe99e8 1456 if (mpa->revision > mpa_rev) {
700456bd
JP
1457 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1458 __func__, mpa_rev, mpa->revision);
cfdda9d7 1459 err = -EPROTO;
da1cecdf 1460 goto err_stop_timer;
cfdda9d7
SW
1461 }
1462 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1463 err = -EPROTO;
da1cecdf 1464 goto err_stop_timer;
cfdda9d7
SW
1465 }
1466
1467 plen = ntohs(mpa->private_data_size);
1468
1469 /*
1470 * Fail if there's too much private data.
1471 */
1472 if (plen > MPA_MAX_PRIVATE_DATA) {
1473 err = -EPROTO;
da1cecdf 1474 goto err_stop_timer;
cfdda9d7
SW
1475 }
1476
1477 /*
1478 * If plen does not account for pkt size
1479 */
1480 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1481 err = -EPROTO;
da1cecdf 1482 goto err_stop_timer;
cfdda9d7
SW
1483 }
1484
1485 ep->plen = (u8) plen;
1486
1487 /*
1488 * If we don't have all the pdata yet, then bail.
1489 * We'll continue process when more data arrives.
1490 */
1491 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
cc18b939 1492 return 0;
cfdda9d7
SW
1493
1494 if (mpa->flags & MPA_REJECT) {
1495 err = -ECONNREFUSED;
da1cecdf 1496 goto err_stop_timer;
cfdda9d7
SW
1497 }
1498
da1cecdf
H
1499 /*
1500 * Stop mpa timer. If it expired, then
1501 * we ignore the MPA reply. process_timeout()
1502 * will abort the connection.
1503 */
1504 if (stop_ep_timer(ep))
1505 return 0;
1506
cfdda9d7
SW
1507 /*
1508 * If we get here we have accumulated the entire mpa
1509 * start reply message including private data. And
1510 * the MPA header is valid.
1511 */
c529fb50 1512 __state_set(&ep->com, FPDU_MODE);
cfdda9d7 1513 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
cfdda9d7 1514 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
d2fe99e8
KS
1515 ep->mpa_attr.version = mpa->revision;
1516 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1517
1518 if (mpa->revision == 2) {
1519 ep->mpa_attr.enhanced_rdma_conn =
1520 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1521 if (ep->mpa_attr.enhanced_rdma_conn) {
1522 mpa_v2_params = (struct mpa_v2_conn_params *)
1523 (ep->mpa_pkt + sizeof(*mpa));
1524 resp_ird = ntohs(mpa_v2_params->ird) &
1525 MPA_V2_IRD_ORD_MASK;
1526 resp_ord = ntohs(mpa_v2_params->ord) &
1527 MPA_V2_IRD_ORD_MASK;
548ddb19 1528 pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
a9a42886 1529 resp_ird, resp_ord, ep->ird, ep->ord);
d2fe99e8
KS
1530
1531 /*
1532 * This is a double-check. Ideally, below checks are
1533 * not required since ird/ord stuff has been taken
1534 * care of in c4iw_accept_cr
1535 */
4c2c5763
HS
1536 if (ep->ird < resp_ord) {
1537 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1538 ep->com.dev->rdev.lldi.max_ordird_qp)
1539 ep->ird = resp_ord;
1540 else
1541 insuff_ird = 1;
1542 } else if (ep->ird > resp_ord) {
1543 ep->ird = resp_ord;
1544 }
1545 if (ep->ord > resp_ird) {
1546 if (RELAXED_IRD_NEGOTIATION)
1547 ep->ord = resp_ird;
1548 else
1549 insuff_ird = 1;
1550 }
1551 if (insuff_ird) {
d2fe99e8
KS
1552 err = -ENOMEM;
1553 ep->ird = resp_ord;
1554 ep->ord = resp_ird;
d2fe99e8
KS
1555 }
1556
1557 if (ntohs(mpa_v2_params->ird) &
1558 MPA_V2_PEER2PEER_MODEL) {
1559 if (ntohs(mpa_v2_params->ord) &
1560 MPA_V2_RDMA_WRITE_RTR)
1561 ep->mpa_attr.p2p_type =
1562 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1563 else if (ntohs(mpa_v2_params->ord) &
1564 MPA_V2_RDMA_READ_RTR)
1565 ep->mpa_attr.p2p_type =
1566 FW_RI_INIT_P2PTYPE_READ_REQ;
1567 }
1568 }
1569 } else if (mpa->revision == 1)
1570 if (peer2peer)
1571 ep->mpa_attr.p2p_type = p2p_type;
1572
548ddb19
BP
1573 pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
1574 ep->mpa_attr.crc_enabled,
a9a42886
JP
1575 ep->mpa_attr.recv_marker_enabled,
1576 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1577 ep->mpa_attr.p2p_type, p2p_type);
d2fe99e8
KS
1578
1579 /*
1580 * If responder's RTR does not match with that of initiator, assign
1581 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1582 * generated when moving QP to RTS state.
1583 * A TERM message will be sent after QP has moved to RTS state
1584 */
91018f86 1585 if ((ep->mpa_attr.version == 2) && peer2peer &&
d2fe99e8
KS
1586 (ep->mpa_attr.p2p_type != p2p_type)) {
1587 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1588 rtr_mismatch = 1;
1589 }
cfdda9d7
SW
1590
1591 attrs.mpa_attr = ep->mpa_attr;
1592 attrs.max_ird = ep->ird;
1593 attrs.max_ord = ep->ord;
1594 attrs.llp_stream_handle = ep;
1595 attrs.next_state = C4IW_QP_STATE_RTS;
1596
1597 mask = C4IW_QP_ATTR_NEXT_STATE |
1598 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1599 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1600
1601 /* bind QP and TID with INIT_WR */
1602 err = c4iw_modify_qp(ep->com.qp->rhp,
1603 ep->com.qp, mask, &attrs, 1);
1604 if (err)
1605 goto err;
d2fe99e8
KS
1606
1607 /*
1608 * If responder's RTR requirement did not match with what initiator
1609 * supports, generate TERM message
1610 */
1611 if (rtr_mismatch) {
700456bd 1612 pr_err("%s: RTR mismatch, sending TERM\n", __func__);
d2fe99e8
KS
1613 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1614 attrs.ecode = MPA_NOMATCH_RTR;
1615 attrs.next_state = C4IW_QP_STATE_TERMINATE;
cc18b939 1616 attrs.send_term = 1;
d2fe99e8 1617 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
cc18b939 1618 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
d2fe99e8 1619 err = -ENOMEM;
cc18b939 1620 disconnect = 1;
d2fe99e8
KS
1621 goto out;
1622 }
1623
1624 /*
1625 * Generate TERM if initiator IRD is not sufficient for responder
1626 * provided ORD. Currently, we do the same behaviour even when
1627 * responder provided IRD is also not sufficient as regards to
1628 * initiator ORD.
1629 */
1630 if (insuff_ird) {
700456bd 1631 pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
d2fe99e8
KS
1632 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1633 attrs.ecode = MPA_INSUFF_IRD;
1634 attrs.next_state = C4IW_QP_STATE_TERMINATE;
cc18b939 1635 attrs.send_term = 1;
d2fe99e8 1636 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
cc18b939 1637 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
d2fe99e8 1638 err = -ENOMEM;
cc18b939 1639 disconnect = 1;
d2fe99e8
KS
1640 goto out;
1641 }
cfdda9d7 1642 goto out;
da1cecdf
H
1643err_stop_timer:
1644 stop_ep_timer(ep);
cfdda9d7 1645err:
f8e1e1d1 1646 disconnect = 2;
cfdda9d7
SW
1647out:
1648 connect_reply_upcall(ep, err);
cc18b939 1649 return disconnect;
cfdda9d7
SW
1650}
1651
fd6aabe4
H
1652/*
1653 * process_mpa_request - process streaming mode MPA request
1654 *
1655 * Returns:
1656 *
1657 * 0 upon success indicating a connect request was delivered to the ULP
1658 * or the mpa request is incomplete but valid so far.
1659 *
1660 * 1 if a failure requires the caller to close the connection.
1661 *
1662 * 2 if a failure requires the caller to abort the connection.
1663 */
1664static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
cfdda9d7
SW
1665{
1666 struct mpa_message *mpa;
d2fe99e8 1667 struct mpa_v2_conn_params *mpa_v2_params;
cfdda9d7
SW
1668 u16 plen;
1669
548ddb19 1670 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7 1671
cfdda9d7
SW
1672 /*
1673 * If we get more than the supported amount of private data
1674 * then we must fail this connection.
1675 */
fd6aabe4
H
1676 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
1677 goto err_stop_timer;
cfdda9d7 1678
548ddb19 1679 pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
cfdda9d7
SW
1680
1681 /*
1682 * Copy the new data into our accumulation buffer.
1683 */
1684 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1685 skb->len);
1686 ep->mpa_pkt_len += skb->len;
1687
1688 /*
1689 * If we don't even have the mpa message, then bail.
1690 * We'll continue process when more data arrives.
1691 */
1692 if (ep->mpa_pkt_len < sizeof(*mpa))
fd6aabe4 1693 return 0;
cfdda9d7 1694
548ddb19 1695 pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
cfdda9d7
SW
1696 mpa = (struct mpa_message *) ep->mpa_pkt;
1697
1698 /*
1699 * Validate MPA Header.
1700 */
d2fe99e8 1701 if (mpa->revision > mpa_rev) {
700456bd
JP
1702 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1703 __func__, mpa_rev, mpa->revision);
fd6aabe4 1704 goto err_stop_timer;
cfdda9d7
SW
1705 }
1706
fd6aabe4
H
1707 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1708 goto err_stop_timer;
cfdda9d7
SW
1709
1710 plen = ntohs(mpa->private_data_size);
1711
1712 /*
1713 * Fail if there's too much private data.
1714 */
fd6aabe4
H
1715 if (plen > MPA_MAX_PRIVATE_DATA)
1716 goto err_stop_timer;
cfdda9d7
SW
1717
1718 /*
1719 * If plen does not account for pkt size
1720 */
fd6aabe4
H
1721 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1722 goto err_stop_timer;
cfdda9d7
SW
1723 ep->plen = (u8) plen;
1724
1725 /*
1726 * If we don't have all the pdata yet, then bail.
1727 */
1728 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
fd6aabe4 1729 return 0;
cfdda9d7
SW
1730
1731 /*
1732 * If we get here we have accumulated the entire mpa
1733 * start reply message including private data.
1734 */
1735 ep->mpa_attr.initiator = 0;
1736 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1737 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1738 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
d2fe99e8
KS
1739 ep->mpa_attr.version = mpa->revision;
1740 if (mpa->revision == 1)
1741 ep->tried_with_mpa_v1 = 1;
1742 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1743
1744 if (mpa->revision == 2) {
1745 ep->mpa_attr.enhanced_rdma_conn =
1746 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1747 if (ep->mpa_attr.enhanced_rdma_conn) {
1748 mpa_v2_params = (struct mpa_v2_conn_params *)
1749 (ep->mpa_pkt + sizeof(*mpa));
1750 ep->ird = ntohs(mpa_v2_params->ird) &
1751 MPA_V2_IRD_ORD_MASK;
7f446abf
SW
1752 ep->ird = min_t(u32, ep->ird,
1753 cur_max_read_depth(ep->com.dev));
d2fe99e8
KS
1754 ep->ord = ntohs(mpa_v2_params->ord) &
1755 MPA_V2_IRD_ORD_MASK;
7f446abf
SW
1756 ep->ord = min_t(u32, ep->ord,
1757 cur_max_read_depth(ep->com.dev));
548ddb19
BP
1758 pr_debug("initiator ird %u ord %u\n",
1759 ep->ird, ep->ord);
d2fe99e8
KS
1760 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1761 if (peer2peer) {
1762 if (ntohs(mpa_v2_params->ord) &
1763 MPA_V2_RDMA_WRITE_RTR)
1764 ep->mpa_attr.p2p_type =
1765 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1766 else if (ntohs(mpa_v2_params->ord) &
1767 MPA_V2_RDMA_READ_RTR)
1768 ep->mpa_attr.p2p_type =
1769 FW_RI_INIT_P2PTYPE_READ_REQ;
1770 }
1771 }
1772 } else if (mpa->revision == 1)
1773 if (peer2peer)
1774 ep->mpa_attr.p2p_type = p2p_type;
1775
548ddb19 1776 pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
a9a42886
JP
1777 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1778 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1779 ep->mpa_attr.p2p_type);
cfdda9d7 1780
e4b76a2a
H
1781 __state_set(&ep->com, MPA_REQ_RCVD);
1782
1783 /* drive upcall */
1784 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
1785 if (ep->parent_ep->com.state != DEAD) {
1786 if (connect_request_upcall(ep))
fd6aabe4 1787 goto err_unlock_parent;
e4b76a2a
H
1788 } else {
1789 goto err_unlock_parent;
be13b2df 1790 }
e4b76a2a 1791 mutex_unlock(&ep->parent_ep->com.mutex);
fd6aabe4
H
1792 return 0;
1793
1794err_unlock_parent:
1795 mutex_unlock(&ep->parent_ep->com.mutex);
1796 goto err_out;
1797err_stop_timer:
1798 (void)stop_ep_timer(ep);
1799err_out:
1800 return 2;
cfdda9d7
SW
1801}
1802
1803static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1804{
1805 struct c4iw_ep *ep;
1806 struct cpl_rx_data *hdr = cplhdr(skb);
1807 unsigned int dlen = ntohs(hdr->len);
1808 unsigned int tid = GET_TID(hdr);
793dad94 1809 __u8 status = hdr->status;
cc18b939 1810 int disconnect = 0;
cfdda9d7 1811
944661dd 1812 ep = get_ep_from_tid(dev, tid);
977116c6
SW
1813 if (!ep)
1814 return 0;
548ddb19 1815 pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
cfdda9d7
SW
1816 skb_pull(skb, sizeof(*hdr));
1817 skb_trim(skb, dlen);
c529fb50 1818 mutex_lock(&ep->com.mutex);
cfdda9d7 1819
c529fb50 1820 switch (ep->com.state) {
cfdda9d7 1821 case MPA_REQ_SENT:
3bcf96e0 1822 update_rx_credits(ep, dlen);
55abf8df 1823 ep->rcv_seq += dlen;
cc18b939 1824 disconnect = process_mpa_reply(ep, skb);
cfdda9d7
SW
1825 break;
1826 case MPA_REQ_WAIT:
3bcf96e0 1827 update_rx_credits(ep, dlen);
55abf8df 1828 ep->rcv_seq += dlen;
4a4dd8db 1829 disconnect = process_mpa_request(ep, skb);
cfdda9d7 1830 break;
1557967b
VP
1831 case FPDU_MODE: {
1832 struct c4iw_qp_attributes attrs;
3bcf96e0
SW
1833
1834 update_rx_credits(ep, dlen);
e8e5b927 1835 if (status)
1557967b 1836 pr_err("%s Unexpected streaming data." \
04236df2
VP
1837 " qpid %u ep %p state %d tid %u status %d\n",
1838 __func__, ep->com.qp->wq.sq.qid, ep,
c529fb50 1839 ep->com.state, ep->hwtid, status);
97d7ec0c 1840 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1557967b 1841 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
cc18b939
SW
1842 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1843 disconnect = 1;
cfdda9d7
SW
1844 break;
1845 }
1557967b
VP
1846 default:
1847 break;
1848 }
c529fb50 1849 mutex_unlock(&ep->com.mutex);
cc18b939 1850 if (disconnect)
4a4dd8db 1851 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
944661dd 1852 c4iw_put_ep(&ep->com);
cfdda9d7
SW
1853 return 0;
1854}
1855
1856static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1857{
1858 struct c4iw_ep *ep;
1859 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
cfdda9d7
SW
1860 int release = 0;
1861 unsigned int tid = GET_TID(rpl);
cfdda9d7 1862
944661dd 1863 ep = get_ep_from_tid(dev, tid);
4984037b 1864 if (!ep) {
700456bd 1865 pr_warn("Abort rpl to freed endpoint\n");
4984037b
VP
1866 return 0;
1867 }
548ddb19 1868 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2f5b48c3 1869 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
1870 switch (ep->com.state) {
1871 case ABORTING:
2015f26c 1872 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
cfdda9d7
SW
1873 __state_set(&ep->com, DEAD);
1874 release = 1;
1875 break;
1876 default:
700456bd 1877 pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
cfdda9d7
SW
1878 break;
1879 }
2f5b48c3 1880 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
1881
1882 if (release)
1883 release_ep_resources(ep);
944661dd 1884 c4iw_put_ep(&ep->com);
cfdda9d7
SW
1885 return 0;
1886}
1887
caa6c9f2 1888static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
5be78ee9
VP
1889{
1890 struct sk_buff *skb;
1891 struct fw_ofld_connection_wr *req;
1892 unsigned int mtu_idx;
cc516700 1893 u32 wscale;
830662f6 1894 struct sockaddr_in *sin;
b408ff28 1895 int win;
5be78ee9
VP
1896
1897 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
de77b966 1898 req = __skb_put_zero(skb, sizeof(*req));
6c53e938 1899 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
e2ac9628 1900 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
41b4f86c
KS
1901 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1902 ep->com.dev->rdev.lldi.ports[0],
5be78ee9 1903 ep->l2t));
170003c8 1904 sin = (struct sockaddr_in *)&ep->com.local_addr;
830662f6
VP
1905 req->le.lport = sin->sin_port;
1906 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
170003c8 1907 sin = (struct sockaddr_in *)&ep->com.remote_addr;
830662f6
VP
1908 req->le.pport = sin->sin_port;
1909 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
5be78ee9 1910 req->tcb.t_state_to_astid =
77a80e23
HS
1911 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
1912 FW_OFLD_CONNECTION_WR_ASTID_V(atid));
5be78ee9 1913 req->tcb.cplrxdataack_cplpassacceptrpl =
77a80e23 1914 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
ef5d6355 1915 req->tcb.tx_max = (__force __be32) jiffies;
793dad94 1916 req->tcb.rcv_adv = htons(1);
44c6d069
VP
1917 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1918 enable_tcp_timestamps,
1919 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
cc516700 1920 wscale = cxgb_compute_wscale(rcv_win);
b408ff28
HS
1921
1922 /*
1923 * Specify the largest window that will fit in opt0. The
1924 * remainder will be specified in the rx_data_ack.
1925 */
1926 win = ep->rcv_win >> 10;
d7990b0c
AB
1927 if (win > RCV_BUFSIZ_M)
1928 win = RCV_BUFSIZ_M;
b408ff28 1929
6c53e938
HS
1930 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
1931 (nocong ? NO_CONG_F : 0) |
d7990b0c 1932 KEEP_ALIVE_F |
6c53e938 1933 DELACK_F |
d7990b0c
AB
1934 WND_SCALE_V(wscale) |
1935 MSS_IDX_V(mtu_idx) |
1936 L2T_IDX_V(ep->l2t->idx) |
1937 TX_CHAN_V(ep->tx_chan) |
1938 SMAC_SEL_V(ep->smac_idx) |
ac8e4c69 1939 DSCP_V(ep->tos >> 2) |
d7990b0c
AB
1940 ULP_MODE_V(ULP_MODE_TCPDDP) |
1941 RCV_BUFSIZ_V(win));
6c53e938
HS
1942 req->tcb.opt2 = (__force __be32) (PACE_V(1) |
1943 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
d7990b0c 1944 RX_CHANNEL_V(0) |
6c53e938 1945 CCTRL_ECN_V(enable_ecn) |
d7990b0c 1946 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
5be78ee9 1947 if (enable_tcp_timestamps)
6c53e938 1948 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
5be78ee9 1949 if (enable_tcp_sack)
6c53e938 1950 req->tcb.opt2 |= (__force __be32)SACK_EN_F;
5be78ee9 1951 if (wscale && enable_tcp_window_scaling)
d7990b0c
AB
1952 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1953 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
1954 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
793dad94
VP
1955 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1956 set_bit(ACT_OFLD_CONN, &ep->com.history);
caa6c9f2 1957 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
5be78ee9
VP
1958}
1959
cfdda9d7 1960/*
4c72efef
H
1961 * Some of the error codes above implicitly indicate that there is no TID
1962 * allocated with the result of an ACT_OPEN. We use this predicate to make
1963 * that explicit.
cfdda9d7
SW
1964 */
1965static inline int act_open_has_tid(int status)
1966{
4c72efef
H
1967 return (status != CPL_ERR_TCAM_PARITY &&
1968 status != CPL_ERR_TCAM_MISS &&
1969 status != CPL_ERR_TCAM_FULL &&
1970 status != CPL_ERR_CONN_EXIST_SYNRECV &&
1971 status != CPL_ERR_CONN_EXIST);
cfdda9d7
SW
1972}
1973
dd92b124
HS
1974static char *neg_adv_str(unsigned int status)
1975{
1976 switch (status) {
1977 case CPL_ERR_RTX_NEG_ADVICE:
1978 return "Retransmit timeout";
1979 case CPL_ERR_PERSIST_NEG_ADVICE:
1980 return "Persist timeout";
1981 case CPL_ERR_KEEPALV_NEG_ADVICE:
1982 return "Keepalive timeout";
1983 default:
1984 return "Unknown";
1985 }
1986}
1987
b408ff28
HS
1988static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
1989{
1990 ep->snd_win = snd_win;
1991 ep->rcv_win = rcv_win;
548ddb19
BP
1992 pr_debug("snd_win %d rcv_win %d\n",
1993 ep->snd_win, ep->rcv_win);
b408ff28
HS
1994}
1995
793dad94
VP
1996#define ACT_OPEN_RETRY_COUNT 2
1997
830662f6
VP
1998static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
1999 struct dst_entry *dst, struct c4iw_dev *cdev,
ac8e4c69 2000 bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
830662f6
VP
2001{
2002 struct neighbour *n;
2003 int err, step;
2004 struct net_device *pdev;
2005
2006 n = dst_neigh_lookup(dst, peer_ip);
2007 if (!n)
2008 return -ENODEV;
2009
2010 rcu_read_lock();
2011 err = -ENOMEM;
2012 if (n->dev->flags & IFF_LOOPBACK) {
2013 if (iptype == 4)
2014 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
2015 else if (IS_ENABLED(CONFIG_IPV6))
2016 for_each_netdev(&init_net, pdev) {
2017 if (ipv6_chk_addr(&init_net,
2018 (struct in6_addr *)peer_ip,
2019 pdev, 1))
2020 break;
2021 }
2022 else
2023 pdev = NULL;
2024
2025 if (!pdev) {
2026 err = -ENODEV;
2027 goto out;
2028 }
2029 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
ac8e4c69 2030 n, pdev, rt_tos2priority(tos));
609e941a
SW
2031 if (!ep->l2t) {
2032 dev_put(pdev);
830662f6 2033 goto out;
609e941a 2034 }
830662f6
VP
2035 ep->mtu = pdev->mtu;
2036 ep->tx_chan = cxgb4_port_chan(pdev);
963cab50
H
2037 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
2038 cxgb4_port_viid(pdev));
830662f6
VP
2039 step = cdev->rdev.lldi.ntxq /
2040 cdev->rdev.lldi.nchan;
2041 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2042 step = cdev->rdev.lldi.nrxq /
2043 cdev->rdev.lldi.nchan;
2044 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2045 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2046 cxgb4_port_idx(pdev) * step];
b408ff28 2047 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
830662f6
VP
2048 dev_put(pdev);
2049 } else {
2050 pdev = get_real_dev(n->dev);
2051 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2052 n, pdev, 0);
2053 if (!ep->l2t)
2054 goto out;
2055 ep->mtu = dst_mtu(dst);
11b8e22d 2056 ep->tx_chan = cxgb4_port_chan(pdev);
963cab50
H
2057 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
2058 cxgb4_port_viid(pdev));
830662f6
VP
2059 step = cdev->rdev.lldi.ntxq /
2060 cdev->rdev.lldi.nchan;
11b8e22d
SW
2061 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2062 ep->ctrlq_idx = cxgb4_port_idx(pdev);
830662f6
VP
2063 step = cdev->rdev.lldi.nrxq /
2064 cdev->rdev.lldi.nchan;
2065 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
11b8e22d 2066 cxgb4_port_idx(pdev) * step];
b408ff28 2067 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
830662f6
VP
2068
2069 if (clear_mpa_v1) {
2070 ep->retry_with_mpa_v1 = 0;
2071 ep->tried_with_mpa_v1 = 0;
2072 }
2073 }
2074 err = 0;
2075out:
2076 rcu_read_unlock();
2077
2078 neigh_release(n);
2079
2080 return err;
2081}
2082
793dad94
VP
2083static int c4iw_reconnect(struct c4iw_ep *ep)
2084{
2085 int err = 0;
4a740838 2086 int size = 0;
24d44a39 2087 struct sockaddr_in *laddr = (struct sockaddr_in *)
170003c8 2088 &ep->com.cm_id->m_local_addr;
24d44a39 2089 struct sockaddr_in *raddr = (struct sockaddr_in *)
170003c8 2090 &ep->com.cm_id->m_remote_addr;
830662f6 2091 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
170003c8 2092 &ep->com.cm_id->m_local_addr;
830662f6 2093 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
170003c8 2094 &ep->com.cm_id->m_remote_addr;
830662f6
VP
2095 int iptype;
2096 __u8 *ra;
793dad94 2097
548ddb19 2098 pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
ef885dc6 2099 c4iw_init_wr_wait(ep->com.wr_waitp);
793dad94 2100
4a740838
H
2101 /* When MPA revision is different on nodes, the node with MPA_rev=2
2102 * tries to reconnect with MPA_rev 1 for the same EP through
2103 * c4iw_reconnect(), where the same EP is assigned with new tid for
2104 * further connection establishment. As we are using the same EP pointer
2105 * for reconnect, few skbs are used during the previous c4iw_connect(),
2106 * which leaves the EP with inadequate skbs for further
ba97b749 2107 * c4iw_reconnect(), Further causing a crash due to an empty
4a740838
H
2108 * skb_list() during peer_abort(). Allocate skbs which is already used.
2109 */
2110 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
2111 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
2112 err = -ENOMEM;
2113 goto fail1;
2114 }
2115
793dad94
VP
2116 /*
2117 * Allocate an active TID to initiate a TCP connection.
2118 */
2119 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
2120 if (ep->atid == -1) {
700456bd 2121 pr_err("%s - cannot alloc atid\n", __func__);
793dad94
VP
2122 err = -ENOMEM;
2123 goto fail2;
2124 }
2125 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
2126
2127 /* find a route */
170003c8 2128 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
804c2f3e
VP
2129 ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
2130 laddr->sin_addr.s_addr,
2131 raddr->sin_addr.s_addr,
2132 laddr->sin_port,
2133 raddr->sin_port, ep->com.cm_id->tos);
830662f6
VP
2134 iptype = 4;
2135 ra = (__u8 *)&raddr->sin_addr;
2136 } else {
95554761
VP
2137 ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
2138 get_real_dev,
2139 laddr6->sin6_addr.s6_addr,
2140 raddr6->sin6_addr.s6_addr,
2141 laddr6->sin6_port,
2142 raddr6->sin6_port, 0,
2143 raddr6->sin6_scope_id);
830662f6
VP
2144 iptype = 6;
2145 ra = (__u8 *)&raddr6->sin6_addr;
2146 }
2147 if (!ep->dst) {
700456bd 2148 pr_err("%s - cannot find route\n", __func__);
793dad94
VP
2149 err = -EHOSTUNREACH;
2150 goto fail3;
2151 }
963cab50 2152 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
ac8e4c69
H
2153 ep->com.dev->rdev.lldi.adapter_type,
2154 ep->com.cm_id->tos);
830662f6 2155 if (err) {
700456bd 2156 pr_err("%s - cannot alloc l2e\n", __func__);
793dad94
VP
2157 goto fail4;
2158 }
2159
548ddb19
BP
2160 pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2161 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
a9a42886 2162 ep->l2t->idx);
793dad94
VP
2163
2164 state_set(&ep->com, CONNECTING);
ac8e4c69 2165 ep->tos = ep->com.cm_id->tos;
793dad94
VP
2166
2167 /* send connect request to rnic */
2168 err = send_connect(ep);
2169 if (!err)
2170 goto out;
2171
2172 cxgb4_l2t_release(ep->l2t);
2173fail4:
2174 dst_release(ep->dst);
2175fail3:
2176 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2177 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2178fail2:
2179 /*
2180 * remember to send notification to upper layer.
2181 * We are in here so the upper layer is not aware that this is
2182 * re-connect attempt and so, upper layer is still waiting for
2183 * response of 1st connect request.
2184 */
2185 connect_reply_upcall(ep, -ECONNRESET);
4a740838 2186fail1:
793dad94
VP
2187 c4iw_put_ep(&ep->com);
2188out:
2189 return err;
2190}
2191
cfdda9d7
SW
2192static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2193{
2194 struct c4iw_ep *ep;
2195 struct cpl_act_open_rpl *rpl = cplhdr(skb);
6c53e938
HS
2196 unsigned int atid = TID_TID_G(AOPEN_ATID_G(
2197 ntohl(rpl->atid_status)));
cfdda9d7 2198 struct tid_info *t = dev->rdev.lldi.tids;
6c53e938 2199 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
830662f6
VP
2200 struct sockaddr_in *la;
2201 struct sockaddr_in *ra;
2202 struct sockaddr_in6 *la6;
2203 struct sockaddr_in6 *ra6;
caa6c9f2 2204 int ret = 0;
cfdda9d7
SW
2205
2206 ep = lookup_atid(t, atid);
170003c8
SW
2207 la = (struct sockaddr_in *)&ep->com.local_addr;
2208 ra = (struct sockaddr_in *)&ep->com.remote_addr;
2209 la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2210 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
cfdda9d7 2211
548ddb19 2212 pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
a9a42886 2213 status, status2errno(status));
cfdda9d7 2214
b65eef0a 2215 if (cxgb_is_neg_adv(status)) {
548ddb19
BP
2216 pr_debug("Connection problems for atid %u status %u (%s)\n",
2217 atid, status, neg_adv_str(status));
179d03bb
H
2218 ep->stats.connect_neg_adv++;
2219 mutex_lock(&dev->rdev.stats.lock);
2220 dev->rdev.stats.neg_adv++;
2221 mutex_unlock(&dev->rdev.stats.lock);
cfdda9d7
SW
2222 return 0;
2223 }
2224
793dad94
VP
2225 set_bit(ACT_OPEN_RPL, &ep->com.history);
2226
d716a2a0
VP
2227 /*
2228 * Log interesting failures.
2229 */
2230 switch (status) {
2231 case CPL_ERR_CONN_RESET:
2232 case CPL_ERR_CONN_TIMEDOUT:
2233 break;
5be78ee9 2234 case CPL_ERR_TCAM_FULL:
830662f6 2235 mutex_lock(&dev->rdev.stats.lock);
3b174d94 2236 dev->rdev.stats.tcam_full++;
830662f6
VP
2237 mutex_unlock(&dev->rdev.stats.lock);
2238 if (ep->com.local_addr.ss_family == AF_INET &&
2239 dev->rdev.lldi.enable_fw_ofld_conn) {
caa6c9f2
H
2240 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
2241 ntohl(rpl->atid_status))));
2242 if (ret)
2243 goto fail;
793dad94
VP
2244 return 0;
2245 }
2246 break;
2247 case CPL_ERR_CONN_EXIST:
2248 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2249 set_bit(ACT_RETRY_INUSE, &ep->com.history);
84cc6ac6
H
2250 if (ep->com.remote_addr.ss_family == AF_INET6) {
2251 struct sockaddr_in6 *sin6 =
2252 (struct sockaddr_in6 *)
170003c8 2253 &ep->com.local_addr;
84cc6ac6
H
2254 cxgb4_clip_release(
2255 ep->com.dev->rdev.lldi.ports[0],
2256 (const u32 *)
2257 &sin6->sin6_addr.s6_addr, 1);
2258 }
793dad94
VP
2259 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
2260 atid);
2261 cxgb4_free_atid(t, atid);
2262 dst_release(ep->dst);
2263 cxgb4_l2t_release(ep->l2t);
2264 c4iw_reconnect(ep);
2265 return 0;
2266 }
5be78ee9 2267 break;
d716a2a0 2268 default:
830662f6
VP
2269 if (ep->com.local_addr.ss_family == AF_INET) {
2270 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2271 atid, status, status2errno(status),
2272 &la->sin_addr.s_addr, ntohs(la->sin_port),
2273 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
2274 } else {
2275 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2276 atid, status, status2errno(status),
2277 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2278 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2279 }
d716a2a0
VP
2280 break;
2281 }
2282
caa6c9f2 2283fail:
cfdda9d7
SW
2284 connect_reply_upcall(ep, status2errno(status));
2285 state_set(&ep->com, DEAD);
2286
84cc6ac6
H
2287 if (ep->com.remote_addr.ss_family == AF_INET6) {
2288 struct sockaddr_in6 *sin6 =
170003c8 2289 (struct sockaddr_in6 *)&ep->com.local_addr;
84cc6ac6
H
2290 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
2291 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2292 }
cfdda9d7 2293 if (status && act_open_has_tid(status))
1dec4cec
GG
2294 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
2295 ep->com.local_addr.ss_family);
cfdda9d7 2296
793dad94 2297 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cfdda9d7
SW
2298 cxgb4_free_atid(t, atid);
2299 dst_release(ep->dst);
2300 cxgb4_l2t_release(ep->l2t);
2301 c4iw_put_ep(&ep->com);
2302
2303 return 0;
2304}
2305
2306static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2307{
2308 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
cfdda9d7 2309 unsigned int stid = GET_TID(rpl);
f86fac79 2310 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
cfdda9d7
SW
2311
2312 if (!ep) {
4d45b757 2313 pr_warn("%s stid %d lookup failure!\n", __func__, stid);
1cab775c 2314 goto out;
cfdda9d7 2315 }
548ddb19 2316 pr_debug("ep %p status %d error %d\n", ep,
a9a42886 2317 rpl->status, status2errno(rpl->status));
2015f26c 2318 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
f86fac79 2319 c4iw_put_ep(&ep->com);
1cab775c 2320out:
cfdda9d7
SW
2321 return 0;
2322}
2323
cfdda9d7
SW
2324static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2325{
2326 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
cfdda9d7 2327 unsigned int stid = GET_TID(rpl);
f86fac79 2328 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
cfdda9d7 2329
3c8415cc 2330 if (!ep) {
4d45b757 2331 pr_warn("%s stid %d lookup failure!\n", __func__, stid);
3c8415cc
SW
2332 goto out;
2333 }
548ddb19 2334 pr_debug("ep %p\n", ep);
2015f26c 2335 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
f86fac79 2336 c4iw_put_ep(&ep->com);
3c8415cc 2337out:
cfdda9d7
SW
2338 return 0;
2339}
2340
9dec900c
H
2341static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2342 struct cpl_pass_accept_req *req)
cfdda9d7
SW
2343{
2344 struct cpl_pass_accept_rpl *rpl;
2345 unsigned int mtu_idx;
2346 u64 opt0;
2347 u32 opt2;
cc516700 2348 u32 wscale;
92e7ae71 2349 struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
b408ff28 2350 int win;
963cab50 2351 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
cfdda9d7 2352
548ddb19 2353 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
92e7ae71 2354
cfdda9d7 2355 skb_get(skb);
92e7ae71 2356 rpl = cplhdr(skb);
963cab50 2357 if (!is_t4(adapter_type)) {
92e7ae71
HS
2358 skb_trim(skb, roundup(sizeof(*rpl5), 16));
2359 rpl5 = (void *)rpl;
2360 INIT_TP_WR(rpl5, ep->hwtid);
2361 } else {
2362 skb_trim(skb, sizeof(*rpl));
2363 INIT_TP_WR(rpl, ep->hwtid);
2364 }
2365 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2366 ep->hwtid));
2367
44c6d069
VP
2368 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2369 enable_tcp_timestamps && req->tcpopt.tstamp,
2370 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
cc516700 2371 wscale = cxgb_compute_wscale(rcv_win);
b408ff28
HS
2372
2373 /*
2374 * Specify the largest window that will fit in opt0. The
2375 * remainder will be specified in the rx_data_ack.
2376 */
2377 win = ep->rcv_win >> 10;
d7990b0c
AB
2378 if (win > RCV_BUFSIZ_M)
2379 win = RCV_BUFSIZ_M;
6c53e938 2380 opt0 = (nocong ? NO_CONG_F : 0) |
d7990b0c 2381 KEEP_ALIVE_F |
6c53e938 2382 DELACK_F |
d7990b0c
AB
2383 WND_SCALE_V(wscale) |
2384 MSS_IDX_V(mtu_idx) |
2385 L2T_IDX_V(ep->l2t->idx) |
2386 TX_CHAN_V(ep->tx_chan) |
2387 SMAC_SEL_V(ep->smac_idx) |
6c53e938 2388 DSCP_V(ep->tos >> 2) |
d7990b0c
AB
2389 ULP_MODE_V(ULP_MODE_TCPDDP) |
2390 RCV_BUFSIZ_V(win);
2391 opt2 = RX_CHANNEL_V(0) |
2392 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
cfdda9d7
SW
2393
2394 if (enable_tcp_timestamps && req->tcpopt.tstamp)
6c53e938 2395 opt2 |= TSTAMPS_EN_F;
cfdda9d7 2396 if (enable_tcp_sack && req->tcpopt.sack)
6c53e938 2397 opt2 |= SACK_EN_F;
cfdda9d7 2398 if (wscale && enable_tcp_window_scaling)
d7990b0c 2399 opt2 |= WND_SCALE_EN_F;
5be78ee9
VP
2400 if (enable_ecn) {
2401 const struct tcphdr *tcph;
2402 u32 hlen = ntohl(req->hdr_len);
2403
963cab50
H
2404 if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
2405 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
2406 IP_HDR_LEN_G(hlen);
2407 else
2408 tcph = (const void *)(req + 1) +
2409 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
5be78ee9 2410 if (tcph->ece && tcph->cwr)
6c53e938 2411 opt2 |= CCTRL_ECN_V(1);
5be78ee9 2412 }
963cab50 2413 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
92e7ae71 2414 u32 isn = (prandom_u32() & ~7UL) - 1;
d7990b0c 2415 opt2 |= T5_OPT_2_VALID_F;
cf7fe64a 2416 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
0b741047 2417 opt2 |= T5_ISS_F;
92e7ae71
HS
2418 rpl5 = (void *)rpl;
2419 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2420 if (peer2peer)
2421 isn += 4;
2422 rpl5->iss = cpu_to_be32(isn);
548ddb19 2423 pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
92e5011a 2424 }
cfdda9d7 2425
cfdda9d7
SW
2426 rpl->opt0 = cpu_to_be64(opt0);
2427 rpl->opt2 = cpu_to_be32(opt2);
d4f1a5c6 2428 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
9dec900c 2429 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
cfdda9d7 2430
9dec900c 2431 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
cfdda9d7
SW
2432}
2433
830662f6 2434static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
cfdda9d7 2435{
548ddb19 2436 pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
cfdda9d7 2437 skb_trim(skb, sizeof(struct cpl_tid_release));
cfdda9d7
SW
2438 release_tid(&dev->rdev, hwtid, skb);
2439 return;
2440}
2441
cfdda9d7
SW
2442static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2443{
793dad94 2444 struct c4iw_ep *child_ep = NULL, *parent_ep;
cfdda9d7 2445 struct cpl_pass_accept_req *req = cplhdr(skb);
6c53e938 2446 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
cfdda9d7
SW
2447 struct tid_info *t = dev->rdev.lldi.tids;
2448 unsigned int hwtid = GET_TID(req);
2449 struct dst_entry *dst;
830662f6 2450 __u8 local_ip[16], peer_ip[16];
cfdda9d7 2451 __be16 local_port, peer_port;
84cc6ac6 2452 struct sockaddr_in6 *sin6;
3786cf18 2453 int err;
1cab775c 2454 u16 peer_mss = ntohs(req->tcpopt.mss);
830662f6 2455 int iptype;
92e7ae71 2456 unsigned short hdrs;
ac8e4c69 2457 u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
cfdda9d7 2458
f86fac79 2459 parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
1cab775c 2460 if (!parent_ep) {
4d45b757
BP
2461 pr_err("%s connect request on invalid stid %d\n",
2462 __func__, stid);
1cab775c
VP
2463 goto reject;
2464 }
1cab775c 2465
cfdda9d7 2466 if (state_read(&parent_ep->com) != LISTEN) {
4d45b757 2467 pr_err("%s - listening ep not in LISTEN\n", __func__);
cfdda9d7
SW
2468 goto reject;
2469 }
2470
85e42b04
VP
2471 cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
2472 &iptype, local_ip, peer_ip, &local_port, &peer_port);
830662f6 2473
cfdda9d7 2474 /* Find output route */
830662f6 2475 if (iptype == 4) {
548ddb19
BP
2476 pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2477 , parent_ep, hwtid,
a9a42886
JP
2478 local_ip, peer_ip, ntohs(local_port),
2479 ntohs(peer_port), peer_mss);
804c2f3e
VP
2480 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
2481 *(__be32 *)local_ip, *(__be32 *)peer_ip,
2482 local_port, peer_port, tos);
830662f6 2483 } else {
548ddb19
BP
2484 pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2485 , parent_ep, hwtid,
a9a42886
JP
2486 local_ip, peer_ip, ntohs(local_port),
2487 ntohs(peer_port), peer_mss);
95554761
VP
2488 dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
2489 local_ip, peer_ip, local_port, peer_port,
2490 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
2491 ((struct sockaddr_in6 *)
2492 &parent_ep->com.local_addr)->sin6_scope_id);
830662f6
VP
2493 }
2494 if (!dst) {
700456bd 2495 pr_err("%s - failed to find dst entry!\n", __func__);
cfdda9d7
SW
2496 goto reject;
2497 }
3786cf18
DM
2498
2499 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2500 if (!child_ep) {
700456bd 2501 pr_err("%s - failed to allocate ep entry!\n", __func__);
cfdda9d7
SW
2502 dst_release(dst);
2503 goto reject;
2504 }
2505
963cab50 2506 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
ac8e4c69 2507 parent_ep->com.dev->rdev.lldi.adapter_type, tos);
3786cf18 2508 if (err) {
700456bd 2509 pr_err("%s - failed to allocate l2t entry!\n", __func__);
cfdda9d7 2510 dst_release(dst);
3786cf18 2511 kfree(child_ep);
cfdda9d7
SW
2512 goto reject;
2513 }
3786cf18 2514
98b80a2a
RR
2515 hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
2516 sizeof(struct tcphdr) +
92e7ae71
HS
2517 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2518 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2519 child_ep->mtu = peer_mss + hdrs;
1cab775c 2520
4a740838
H
2521 skb_queue_head_init(&child_ep->com.ep_skb_list);
2522 if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
2523 goto fail;
2524
cfdda9d7
SW
2525 state_set(&child_ep->com, CONNECTING);
2526 child_ep->com.dev = dev;
2527 child_ep->com.cm_id = NULL;
5b6b8fe6 2528
830662f6
VP
2529 if (iptype == 4) {
2530 struct sockaddr_in *sin = (struct sockaddr_in *)
170003c8 2531 &child_ep->com.local_addr;
5b6b8fe6 2532
b462b06e 2533 sin->sin_family = AF_INET;
830662f6
VP
2534 sin->sin_port = local_port;
2535 sin->sin_addr.s_addr = *(__be32 *)local_ip;
5b6b8fe6
SW
2536
2537 sin = (struct sockaddr_in *)&child_ep->com.local_addr;
b462b06e 2538 sin->sin_family = AF_INET;
5b6b8fe6
SW
2539 sin->sin_port = ((struct sockaddr_in *)
2540 &parent_ep->com.local_addr)->sin_port;
2541 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2542
170003c8 2543 sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
b462b06e 2544 sin->sin_family = AF_INET;
830662f6
VP
2545 sin->sin_port = peer_port;
2546 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2547 } else {
170003c8 2548 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
830662f6
VP
2549 sin6->sin6_family = PF_INET6;
2550 sin6->sin6_port = local_port;
2551 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
5b6b8fe6
SW
2552
2553 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2554 sin6->sin6_family = PF_INET6;
2555 sin6->sin6_port = ((struct sockaddr_in6 *)
2556 &parent_ep->com.local_addr)->sin6_port;
2557 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2558
170003c8 2559 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
830662f6
VP
2560 sin6->sin6_family = PF_INET6;
2561 sin6->sin6_port = peer_port;
2562 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2563 }
5b6b8fe6 2564
cfdda9d7
SW
2565 c4iw_get_ep(&parent_ep->com);
2566 child_ep->parent_ep = parent_ep;
ac8e4c69 2567 child_ep->tos = tos;
cfdda9d7
SW
2568 child_ep->dst = dst;
2569 child_ep->hwtid = hwtid;
cfdda9d7 2570
548ddb19 2571 pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
a9a42886 2572 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
cfdda9d7 2573
a9346abe 2574 timer_setup(&child_ep->timer, ep_timeout, 0);
1dec4cec
GG
2575 cxgb4_insert_tid(t, child_ep, hwtid,
2576 child_ep->com.local_addr.ss_family);
944661dd 2577 insert_ep_tid(child_ep);
9dec900c
H
2578 if (accept_cr(child_ep, skb, req)) {
2579 c4iw_put_ep(&parent_ep->com);
2580 release_ep_resources(child_ep);
2581 } else {
2582 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2583 }
84cc6ac6 2584 if (iptype == 6) {
170003c8 2585 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
84cc6ac6
H
2586 cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
2587 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2588 }
cfdda9d7 2589 goto out;
4a740838
H
2590fail:
2591 c4iw_put_ep(&child_ep->com);
cfdda9d7 2592reject:
830662f6 2593 reject_cr(dev, hwtid, skb);
3d318605 2594out:
f86fac79
H
2595 if (parent_ep)
2596 c4iw_put_ep(&parent_ep->com);
cfdda9d7
SW
2597 return 0;
2598}
2599
2600static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2601{
2602 struct c4iw_ep *ep;
2603 struct cpl_pass_establish *req = cplhdr(skb);
cfdda9d7 2604 unsigned int tid = GET_TID(req);
fef4422d 2605 int ret;
cfdda9d7 2606
944661dd 2607 ep = get_ep_from_tid(dev, tid);
548ddb19 2608 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
2609 ep->snd_seq = be32_to_cpu(req->snd_isn);
2610 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2611
548ddb19 2612 pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid,
a9a42886 2613 ntohs(req->tcp_opt));
1cab775c 2614
cfdda9d7
SW
2615 set_emss(ep, ntohs(req->tcp_opt));
2616
2617 dst_confirm(ep->dst);
fef4422d
H
2618 mutex_lock(&ep->com.mutex);
2619 ep->com.state = MPA_REQ_WAIT;
cfdda9d7 2620 start_ep_timer(ep);
793dad94 2621 set_bit(PASS_ESTAB, &ep->com.history);
4a740838 2622 ret = send_flowc(ep);
fef4422d
H
2623 mutex_unlock(&ep->com.mutex);
2624 if (ret)
2625 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
944661dd 2626 c4iw_put_ep(&ep->com);
cfdda9d7
SW
2627
2628 return 0;
2629}
2630
2631static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2632{
2633 struct cpl_peer_close *hdr = cplhdr(skb);
2634 struct c4iw_ep *ep;
2635 struct c4iw_qp_attributes attrs;
cfdda9d7
SW
2636 int disconnect = 1;
2637 int release = 0;
cfdda9d7 2638 unsigned int tid = GET_TID(hdr);
8da7e7a5 2639 int ret;
cfdda9d7 2640
944661dd
H
2641 ep = get_ep_from_tid(dev, tid);
2642 if (!ep)
2643 return 0;
2644
548ddb19 2645 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
2646 dst_confirm(ep->dst);
2647
793dad94 2648 set_bit(PEER_CLOSE, &ep->com.history);
2f5b48c3 2649 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2650 switch (ep->com.state) {
2651 case MPA_REQ_WAIT:
2652 __state_set(&ep->com, CLOSING);
2653 break;
2654 case MPA_REQ_SENT:
2655 __state_set(&ep->com, CLOSING);
2656 connect_reply_upcall(ep, -ECONNRESET);
2657 break;
2658 case MPA_REQ_RCVD:
2659
2660 /*
2661 * We're gonna mark this puppy DEAD, but keep
2662 * the reference on it until the ULP accepts or
2663 * rejects the CR. Also wake up anyone waiting
2664 * in rdma connection migration (see c4iw_accept_cr()).
2665 */
2666 __state_set(&ep->com, CLOSING);
a9a42886 2667 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2015f26c 2668 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
cfdda9d7
SW
2669 break;
2670 case MPA_REP_SENT:
2671 __state_set(&ep->com, CLOSING);
a9a42886 2672 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2015f26c 2673 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
cfdda9d7
SW
2674 break;
2675 case FPDU_MODE:
ca5a2202 2676 start_ep_timer(ep);
cfdda9d7 2677 __state_set(&ep->com, CLOSING);
30c95c2d 2678 attrs.next_state = C4IW_QP_STATE_CLOSING;
8da7e7a5 2679 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
30c95c2d 2680 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
8da7e7a5
SW
2681 if (ret != -ECONNRESET) {
2682 peer_close_upcall(ep);
2683 disconnect = 1;
2684 }
cfdda9d7
SW
2685 break;
2686 case ABORTING:
2687 disconnect = 0;
2688 break;
2689 case CLOSING:
2690 __state_set(&ep->com, MORIBUND);
2691 disconnect = 0;
2692 break;
2693 case MORIBUND:
b33bd0cb 2694 (void)stop_ep_timer(ep);
cfdda9d7
SW
2695 if (ep->com.cm_id && ep->com.qp) {
2696 attrs.next_state = C4IW_QP_STATE_IDLE;
2697 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2698 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2699 }
be13b2df 2700 close_complete_upcall(ep, 0);
cfdda9d7
SW
2701 __state_set(&ep->com, DEAD);
2702 release = 1;
2703 disconnect = 0;
2704 break;
2705 case DEAD:
2706 disconnect = 0;
2707 break;
2708 default:
ba97b749 2709 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
cfdda9d7 2710 }
2f5b48c3 2711 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
2712 if (disconnect)
2713 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2714 if (release)
2715 release_ep_resources(ep);
944661dd 2716 c4iw_put_ep(&ep->com);
cfdda9d7
SW
2717 return 0;
2718}
2719
cfdda9d7
SW
2720static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2721{
2722 struct cpl_abort_req_rss *req = cplhdr(skb);
2723 struct c4iw_ep *ep;
cfdda9d7
SW
2724 struct sk_buff *rpl_skb;
2725 struct c4iw_qp_attributes attrs;
2726 int ret;
2727 int release = 0;
cfdda9d7 2728 unsigned int tid = GET_TID(req);
052f4731 2729 u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
cfdda9d7 2730
944661dd
H
2731 ep = get_ep_from_tid(dev, tid);
2732 if (!ep)
2733 return 0;
2734
b65eef0a 2735 if (cxgb_is_neg_adv(req->status)) {
f48fca4d
BP
2736 pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
2737 ep->hwtid, req->status, neg_adv_str(req->status));
179d03bb
H
2738 ep->stats.abort_neg_adv++;
2739 mutex_lock(&dev->rdev.stats.lock);
2740 dev->rdev.stats.neg_adv++;
2741 mutex_unlock(&dev->rdev.stats.lock);
944661dd 2742 goto deref_ep;
cfdda9d7 2743 }
548ddb19 2744 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
a9a42886 2745 ep->com.state);
793dad94 2746 set_bit(PEER_ABORT, &ep->com.history);
2f5b48c3
SW
2747
2748 /*
2749 * Wake up any threads in rdma_init() or rdma_fini().
d2fe99e8
KS
2750 * However, this is not needed if com state is just
2751 * MPA_REQ_SENT
2f5b48c3 2752 */
d2fe99e8 2753 if (ep->com.state != MPA_REQ_SENT)
2015f26c 2754 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2f5b48c3
SW
2755
2756 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2757 switch (ep->com.state) {
2758 case CONNECTING:
9dec900c 2759 c4iw_put_ep(&ep->parent_ep->com);
cfdda9d7
SW
2760 break;
2761 case MPA_REQ_WAIT:
b33bd0cb 2762 (void)stop_ep_timer(ep);
cfdda9d7
SW
2763 break;
2764 case MPA_REQ_SENT:
b33bd0cb 2765 (void)stop_ep_timer(ep);
fe7e0a4d 2766 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
d2fe99e8
KS
2767 connect_reply_upcall(ep, -ECONNRESET);
2768 else {
2769 /*
2770 * we just don't send notification upwards because we
2771 * want to retry with mpa_v1 without upper layers even
2772 * knowing it.
2773 *
2774 * do some housekeeping so as to re-initiate the
2775 * connection
2776 */
4d45b757
BP
2777 pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
2778 __func__, mpa_rev);
d2fe99e8
KS
2779 ep->retry_with_mpa_v1 = 1;
2780 }
cfdda9d7
SW
2781 break;
2782 case MPA_REP_SENT:
cfdda9d7
SW
2783 break;
2784 case MPA_REQ_RCVD:
cfdda9d7
SW
2785 break;
2786 case MORIBUND:
2787 case CLOSING:
ca5a2202 2788 stop_ep_timer(ep);
cfdda9d7
SW
2789 /*FALLTHROUGH*/
2790 case FPDU_MODE:
2791 if (ep->com.cm_id && ep->com.qp) {
2792 attrs.next_state = C4IW_QP_STATE_ERROR;
2793 ret = c4iw_modify_qp(ep->com.qp->rhp,
2794 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2795 &attrs, 1);
2796 if (ret)
700456bd 2797 pr_err("%s - qp <- error failed!\n", __func__);
cfdda9d7
SW
2798 }
2799 peer_abort_upcall(ep);
2800 break;
2801 case ABORTING:
2802 break;
2803 case DEAD:
4d45b757 2804 pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2f5b48c3 2805 mutex_unlock(&ep->com.mutex);
944661dd 2806 goto deref_ep;
cfdda9d7 2807 default:
ba97b749 2808 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
cfdda9d7
SW
2809 break;
2810 }
2811 dst_confirm(ep->dst);
2812 if (ep->com.state != ABORTING) {
2813 __state_set(&ep->com, DEAD);
d2fe99e8
KS
2814 /* we don't release if we want to retry with mpa_v1 */
2815 if (!ep->retry_with_mpa_v1)
2816 release = 1;
cfdda9d7 2817 }
2f5b48c3 2818 mutex_unlock(&ep->com.mutex);
cfdda9d7 2819
4a740838
H
2820 rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
2821 if (WARN_ON(!rpl_skb)) {
cfdda9d7
SW
2822 release = 1;
2823 goto out;
2824 }
052f4731
VP
2825
2826 cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
2827
cfdda9d7
SW
2828 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2829out:
cfdda9d7
SW
2830 if (release)
2831 release_ep_resources(ep);
fe7e0a4d 2832 else if (ep->retry_with_mpa_v1) {
84cc6ac6
H
2833 if (ep->com.remote_addr.ss_family == AF_INET6) {
2834 struct sockaddr_in6 *sin6 =
2835 (struct sockaddr_in6 *)
170003c8 2836 &ep->com.local_addr;
84cc6ac6
H
2837 cxgb4_clip_release(
2838 ep->com.dev->rdev.lldi.ports[0],
2839 (const u32 *)&sin6->sin6_addr.s6_addr,
2840 1);
2841 }
fe7e0a4d 2842 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
1dec4cec
GG
2843 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
2844 ep->com.local_addr.ss_family);
d2fe99e8
KS
2845 dst_release(ep->dst);
2846 cxgb4_l2t_release(ep->l2t);
2847 c4iw_reconnect(ep);
2848 }
2849
944661dd
H
2850deref_ep:
2851 c4iw_put_ep(&ep->com);
2852 /* Dereferencing ep, referenced in peer_abort_intr() */
2853 c4iw_put_ep(&ep->com);
cfdda9d7
SW
2854 return 0;
2855}
2856
2857static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2858{
2859 struct c4iw_ep *ep;
2860 struct c4iw_qp_attributes attrs;
2861 struct cpl_close_con_rpl *rpl = cplhdr(skb);
cfdda9d7 2862 int release = 0;
cfdda9d7 2863 unsigned int tid = GET_TID(rpl);
cfdda9d7 2864
944661dd
H
2865 ep = get_ep_from_tid(dev, tid);
2866 if (!ep)
2867 return 0;
cfdda9d7 2868
548ddb19 2869 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
2870
2871 /* The cm_id may be null if we failed to connect */
2f5b48c3 2872 mutex_lock(&ep->com.mutex);
9ca6f7cf 2873 set_bit(CLOSE_CON_RPL, &ep->com.history);
cfdda9d7
SW
2874 switch (ep->com.state) {
2875 case CLOSING:
2876 __state_set(&ep->com, MORIBUND);
2877 break;
2878 case MORIBUND:
b33bd0cb 2879 (void)stop_ep_timer(ep);
cfdda9d7
SW
2880 if ((ep->com.cm_id) && (ep->com.qp)) {
2881 attrs.next_state = C4IW_QP_STATE_IDLE;
2882 c4iw_modify_qp(ep->com.qp->rhp,
2883 ep->com.qp,
2884 C4IW_QP_ATTR_NEXT_STATE,
2885 &attrs, 1);
2886 }
be13b2df 2887 close_complete_upcall(ep, 0);
cfdda9d7
SW
2888 __state_set(&ep->com, DEAD);
2889 release = 1;
2890 break;
2891 case ABORTING:
2892 case DEAD:
2893 break;
2894 default:
ba97b749 2895 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
cfdda9d7
SW
2896 break;
2897 }
2f5b48c3 2898 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
2899 if (release)
2900 release_ep_resources(ep);
944661dd 2901 c4iw_put_ep(&ep->com);
cfdda9d7
SW
2902 return 0;
2903}
2904
2905static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2906{
0e42c1f4 2907 struct cpl_rdma_terminate *rpl = cplhdr(skb);
0e42c1f4
SW
2908 unsigned int tid = GET_TID(rpl);
2909 struct c4iw_ep *ep;
2910 struct c4iw_qp_attributes attrs;
cfdda9d7 2911
944661dd 2912 ep = get_ep_from_tid(dev, tid);
cfdda9d7 2913
30c95c2d 2914 if (ep && ep->com.qp) {
700456bd
JP
2915 pr_warn("TERM received tid %u qpid %u\n",
2916 tid, ep->com.qp->wq.sq.qid);
0e42c1f4
SW
2917 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2918 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2919 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2920 } else
700456bd 2921 pr_warn("TERM received tid %u no ep/qp\n", tid);
944661dd 2922 c4iw_put_ep(&ep->com);
cfdda9d7 2923
cfdda9d7
SW
2924 return 0;
2925}
2926
2927/*
2928 * Upcall from the adapter indicating data has been transmitted.
2929 * For us its just the single MPA request or reply. We can now free
2930 * the skb holding the mpa message.
2931 */
2932static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2933{
2934 struct c4iw_ep *ep;
2935 struct cpl_fw4_ack *hdr = cplhdr(skb);
2936 u8 credits = hdr->credits;
2937 unsigned int tid = GET_TID(hdr);
cfdda9d7
SW
2938
2939
944661dd
H
2940 ep = get_ep_from_tid(dev, tid);
2941 if (!ep)
2942 return 0;
548ddb19
BP
2943 pr_debug("ep %p tid %u credits %u\n",
2944 ep, ep->hwtid, credits);
cfdda9d7 2945 if (credits == 0) {
548ddb19
BP
2946 pr_debug("0 credit ack ep %p tid %u state %u\n",
2947 ep, ep->hwtid, state_read(&ep->com));
944661dd 2948 goto out;
cfdda9d7
SW
2949 }
2950
2951 dst_confirm(ep->dst);
2952 if (ep->mpa_skb) {
548ddb19
BP
2953 pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
2954 ep, ep->hwtid, state_read(&ep->com),
2955 ep->mpa_attr.initiator ? 1 : 0);
12eb5137 2956 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2957 kfree_skb(ep->mpa_skb);
2958 ep->mpa_skb = NULL;
e4b76a2a
H
2959 if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
2960 stop_ep_timer(ep);
2961 mutex_unlock(&ep->com.mutex);
cfdda9d7 2962 }
944661dd
H
2963out:
2964 c4iw_put_ep(&ep->com);
cfdda9d7
SW
2965 return 0;
2966}
2967
cfdda9d7
SW
2968int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2969{
bce2841f 2970 int abort;
cfdda9d7 2971 struct c4iw_ep *ep = to_ep(cm_id);
bce2841f 2972
548ddb19 2973 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7 2974
a7db89eb 2975 mutex_lock(&ep->com.mutex);
e8667a9b 2976 if (ep->com.state != MPA_REQ_RCVD) {
a7db89eb 2977 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
2978 c4iw_put_ep(&ep->com);
2979 return -ECONNRESET;
2980 }
793dad94 2981 set_bit(ULP_REJECT, &ep->com.history);
cfdda9d7 2982 if (mpa_rev == 0)
bce2841f
H
2983 abort = 1;
2984 else
2985 abort = send_mpa_reject(ep, pdata, pdata_len);
a7db89eb 2986 mutex_unlock(&ep->com.mutex);
bce2841f
H
2987
2988 stop_ep_timer(ep);
2989 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
cfdda9d7
SW
2990 c4iw_put_ep(&ep->com);
2991 return 0;
2992}
2993
2994int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2995{
2996 int err;
2997 struct c4iw_qp_attributes attrs;
2998 enum c4iw_qp_attr_mask mask;
2999 struct c4iw_ep *ep = to_ep(cm_id);
3000 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
3001 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
eaf4c6d4 3002 int abort = 0;
cfdda9d7 3003
548ddb19 3004 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
a7db89eb
SW
3005
3006 mutex_lock(&ep->com.mutex);
e8667a9b 3007 if (ep->com.state != MPA_REQ_RCVD) {
cfdda9d7 3008 err = -ECONNRESET;
eaf4c6d4 3009 goto err_out;
cfdda9d7
SW
3010 }
3011
ba97b749
SW
3012 if (!qp) {
3013 err = -EINVAL;
3014 goto err_out;
3015 }
cfdda9d7 3016
793dad94 3017 set_bit(ULP_ACCEPT, &ep->com.history);
4c2c5763
HS
3018 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
3019 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
cfdda9d7 3020 err = -EINVAL;
eaf4c6d4 3021 goto err_abort;
cfdda9d7
SW
3022 }
3023
d2fe99e8
KS
3024 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3025 if (conn_param->ord > ep->ird) {
4c2c5763 3026 if (RELAXED_IRD_NEGOTIATION) {
30b03b15 3027 conn_param->ord = ep->ird;
4c2c5763
HS
3028 } else {
3029 ep->ird = conn_param->ird;
3030 ep->ord = conn_param->ord;
3031 send_mpa_reject(ep, conn_param->private_data,
3032 conn_param->private_data_len);
4c2c5763 3033 err = -ENOMEM;
eaf4c6d4 3034 goto err_abort;
4c2c5763 3035 }
d2fe99e8 3036 }
4c2c5763
HS
3037 if (conn_param->ird < ep->ord) {
3038 if (RELAXED_IRD_NEGOTIATION &&
3039 ep->ord <= h->rdev.lldi.max_ordird_qp) {
3040 conn_param->ird = ep->ord;
3041 } else {
d2fe99e8 3042 err = -ENOMEM;
eaf4c6d4 3043 goto err_abort;
d2fe99e8
KS
3044 }
3045 }
d2fe99e8 3046 }
cfdda9d7
SW
3047 ep->ird = conn_param->ird;
3048 ep->ord = conn_param->ord;
3049
4c2c5763 3050 if (ep->mpa_attr.version == 1) {
d2fe99e8
KS
3051 if (peer2peer && ep->ird == 0)
3052 ep->ird = 1;
4c2c5763
HS
3053 } else {
3054 if (peer2peer &&
3055 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
f57b780c 3056 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
4c2c5763
HS
3057 ep->ird = 1;
3058 }
cfdda9d7 3059
548ddb19 3060 pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
cfdda9d7 3061
d2fe99e8 3062 ep->com.cm_id = cm_id;
9ca6f7cf 3063 ref_cm_id(&ep->com);
d2fe99e8 3064 ep->com.qp = qp;
325abead 3065 ref_qp(ep);
d2fe99e8 3066
cfdda9d7
SW
3067 /* bind QP to EP and move to RTS */
3068 attrs.mpa_attr = ep->mpa_attr;
3069 attrs.max_ird = ep->ird;
3070 attrs.max_ord = ep->ord;
3071 attrs.llp_stream_handle = ep;
3072 attrs.next_state = C4IW_QP_STATE_RTS;
3073
3074 /* bind QP and TID with INIT_WR */
3075 mask = C4IW_QP_ATTR_NEXT_STATE |
3076 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
3077 C4IW_QP_ATTR_MPA_ATTR |
3078 C4IW_QP_ATTR_MAX_IRD |
3079 C4IW_QP_ATTR_MAX_ORD;
3080
3081 err = c4iw_modify_qp(ep->com.qp->rhp,
3082 ep->com.qp, mask, &attrs, 1);
3083 if (err)
eaf4c6d4 3084 goto err_deref_cm_id;
e4b76a2a
H
3085
3086 set_bit(STOP_MPA_TIMER, &ep->com.flags);
cfdda9d7
SW
3087 err = send_mpa_reply(ep, conn_param->private_data,
3088 conn_param->private_data_len);
3089 if (err)
eaf4c6d4 3090 goto err_deref_cm_id;
cfdda9d7 3091
a7db89eb 3092 __state_set(&ep->com, FPDU_MODE);
cfdda9d7 3093 established_upcall(ep);
a7db89eb 3094 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
3095 c4iw_put_ep(&ep->com);
3096 return 0;
eaf4c6d4 3097err_deref_cm_id:
9ca6f7cf 3098 deref_cm_id(&ep->com);
eaf4c6d4
H
3099err_abort:
3100 abort = 1;
3101err_out:
a7db89eb 3102 mutex_unlock(&ep->com.mutex);
eaf4c6d4
H
3103 if (abort)
3104 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
cfdda9d7
SW
3105 c4iw_put_ep(&ep->com);
3106 return err;
3107}
3108
830662f6
VP
3109static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3110{
3111 struct in_device *ind;
3112 int found = 0;
170003c8
SW
3113 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3114 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
830662f6
VP
3115
3116 ind = in_dev_get(dev->rdev.lldi.ports[0]);
3117 if (!ind)
3118 return -EADDRNOTAVAIL;
3119 for_primary_ifa(ind) {
3120 laddr->sin_addr.s_addr = ifa->ifa_address;
3121 raddr->sin_addr.s_addr = ifa->ifa_address;
3122 found = 1;
3123 break;
3124 }
3125 endfor_ifa(ind);
3126 in_dev_put(ind);
3127 return found ? 0 : -EADDRNOTAVAIL;
3128}
3129
3130static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
3131 unsigned char banned_flags)
3132{
3133 struct inet6_dev *idev;
3134 int err = -EADDRNOTAVAIL;
3135
3136 rcu_read_lock();
3137 idev = __in6_dev_get(dev);
3138 if (idev != NULL) {
3139 struct inet6_ifaddr *ifp;
3140
3141 read_lock_bh(&idev->lock);
3142 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3143 if (ifp->scope == IFA_LINK &&
3144 !(ifp->flags & banned_flags)) {
3145 memcpy(addr, &ifp->addr, 16);
3146 err = 0;
3147 break;
3148 }
3149 }
3150 read_unlock_bh(&idev->lock);
3151 }
3152 rcu_read_unlock();
3153 return err;
3154}
3155
3156static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3157{
3158 struct in6_addr uninitialized_var(addr);
170003c8
SW
3159 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3160 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
830662f6 3161
54b9a96f 3162 if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
830662f6
VP
3163 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
3164 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
3165 return 0;
3166 }
3167 return -EADDRNOTAVAIL;
3168}
3169
cfdda9d7
SW
3170int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3171{
cfdda9d7
SW
3172 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3173 struct c4iw_ep *ep;
3786cf18 3174 int err = 0;
9eccfe10
SW
3175 struct sockaddr_in *laddr;
3176 struct sockaddr_in *raddr;
3177 struct sockaddr_in6 *laddr6;
3178 struct sockaddr_in6 *raddr6;
830662f6
VP
3179 __u8 *ra;
3180 int iptype;
cfdda9d7 3181
4c2c5763
HS
3182 if ((conn_param->ord > cur_max_read_depth(dev)) ||
3183 (conn_param->ird > cur_max_read_depth(dev))) {
be4c9bad
RD
3184 err = -EINVAL;
3185 goto out;
3186 }
cfdda9d7
SW
3187 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3188 if (!ep) {
700456bd 3189 pr_err("%s - cannot alloc ep\n", __func__);
cfdda9d7
SW
3190 err = -ENOMEM;
3191 goto out;
3192 }
4a740838
H
3193
3194 skb_queue_head_init(&ep->com.ep_skb_list);
3195 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
3196 err = -ENOMEM;
3197 goto fail1;
3198 }
3199
a9346abe 3200 timer_setup(&ep->timer, ep_timeout, 0);
cfdda9d7
SW
3201 ep->plen = conn_param->private_data_len;
3202 if (ep->plen)
3203 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3204 conn_param->private_data, ep->plen);
3205 ep->ird = conn_param->ird;
3206 ep->ord = conn_param->ord;
3207
3208 if (peer2peer && ep->ord == 0)
3209 ep->ord = 1;
3210
cfdda9d7 3211 ep->com.cm_id = cm_id;
9ca6f7cf
H
3212 ref_cm_id(&ep->com);
3213 ep->com.dev = dev;
cfdda9d7 3214 ep->com.qp = get_qhp(dev, conn_param->qpn);
830662f6 3215 if (!ep->com.qp) {
4d45b757 3216 pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
830662f6 3217 err = -EINVAL;
4a740838 3218 goto fail2;
830662f6 3219 }
325abead 3220 ref_qp(ep);
548ddb19 3221 pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
a9a42886 3222 ep->com.qp, cm_id);
cfdda9d7
SW
3223
3224 /*
3225 * Allocate an active TID to initiate a TCP connection.
3226 */
3227 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3228 if (ep->atid == -1) {
700456bd 3229 pr_err("%s - cannot alloc atid\n", __func__);
cfdda9d7 3230 err = -ENOMEM;
4a740838 3231 goto fail2;
cfdda9d7 3232 }
793dad94 3233 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
cfdda9d7 3234
170003c8 3235 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
9eccfe10 3236 sizeof(ep->com.local_addr));
170003c8 3237 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
9eccfe10
SW
3238 sizeof(ep->com.remote_addr));
3239
170003c8
SW
3240 laddr = (struct sockaddr_in *)&ep->com.local_addr;
3241 raddr = (struct sockaddr_in *)&ep->com.remote_addr;
3242 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3243 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
9eccfe10 3244
170003c8 3245 if (cm_id->m_remote_addr.ss_family == AF_INET) {
830662f6
VP
3246 iptype = 4;
3247 ra = (__u8 *)&raddr->sin_addr;
cfdda9d7 3248
830662f6
VP
3249 /*
3250 * Handle loopback requests to INADDR_ANY.
3251 */
ba987e51 3252 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
830662f6
VP
3253 err = pick_local_ipaddrs(dev, cm_id);
3254 if (err)
4a740838 3255 goto fail2;
830662f6
VP
3256 }
3257
3258 /* find a route */
548ddb19
BP
3259 pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3260 &laddr->sin_addr, ntohs(laddr->sin_port),
a9a42886 3261 ra, ntohs(raddr->sin_port));
804c2f3e
VP
3262 ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3263 laddr->sin_addr.s_addr,
3264 raddr->sin_addr.s_addr,
3265 laddr->sin_port,
3266 raddr->sin_port, cm_id->tos);
830662f6
VP
3267 } else {
3268 iptype = 6;
3269 ra = (__u8 *)&raddr6->sin6_addr;
3270
3271 /*
3272 * Handle loopback requests to INADDR_ANY.
3273 */
3274 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3275 err = pick_local_ip6addrs(dev, cm_id);
3276 if (err)
4a740838 3277 goto fail2;
830662f6
VP
3278 }
3279
3280 /* find a route */
548ddb19
BP
3281 pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3282 laddr6->sin6_addr.s6_addr,
a9a42886
JP
3283 ntohs(laddr6->sin6_port),
3284 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
95554761
VP
3285 ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
3286 laddr6->sin6_addr.s6_addr,
3287 raddr6->sin6_addr.s6_addr,
3288 laddr6->sin6_port,
3289 raddr6->sin6_port, 0,
3290 raddr6->sin6_scope_id);
830662f6
VP
3291 }
3292 if (!ep->dst) {
700456bd 3293 pr_err("%s - cannot find route\n", __func__);
cfdda9d7 3294 err = -EHOSTUNREACH;
4a740838 3295 goto fail3;
cfdda9d7 3296 }
cfdda9d7 3297
963cab50 3298 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
ac8e4c69 3299 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3786cf18 3300 if (err) {
700456bd 3301 pr_err("%s - cannot alloc l2e\n", __func__);
4a740838 3302 goto fail4;
cfdda9d7
SW
3303 }
3304
548ddb19
BP
3305 pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3306 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
a9a42886 3307 ep->l2t->idx);
cfdda9d7
SW
3308
3309 state_set(&ep->com, CONNECTING);
ac8e4c69 3310 ep->tos = cm_id->tos;
cfdda9d7
SW
3311
3312 /* send connect request to rnic */
3313 err = send_connect(ep);
3314 if (!err)
3315 goto out;
3316
3317 cxgb4_l2t_release(ep->l2t);
4a740838 3318fail4:
9eccfe10 3319 dst_release(ep->dst);
4a740838 3320fail3:
793dad94 3321 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cfdda9d7 3322 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
4a740838
H
3323fail2:
3324 skb_queue_purge(&ep->com.ep_skb_list);
9ca6f7cf 3325 deref_cm_id(&ep->com);
4a740838 3326fail1:
cfdda9d7
SW
3327 c4iw_put_ep(&ep->com);
3328out:
3329 return err;
3330}
3331
830662f6
VP
3332static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3333{
3334 int err;
9eccfe10 3335 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
170003c8 3336 &ep->com.local_addr;
830662f6 3337
28de1f74
H
3338 if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
3339 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
3340 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3341 if (err)
3342 return err;
3343 }
ef885dc6 3344 c4iw_init_wr_wait(ep->com.wr_waitp);
830662f6
VP
3345 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3346 ep->stid, &sin6->sin6_addr,
3347 sin6->sin6_port,
3348 ep->com.dev->rdev.lldi.rxq_ids[0]);
3349 if (!err)
3350 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
ef885dc6 3351 ep->com.wr_waitp,
830662f6 3352 0, 0, __func__);
e6b11163
H
3353 else if (err > 0)
3354 err = net_xmit_errno(err);
28de1f74
H
3355 if (err) {
3356 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3357 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
830662f6
VP
3358 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3359 err, ep->stid,
3360 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
28de1f74 3361 }
830662f6
VP
3362 return err;
3363}
3364
3365static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3366{
3367 int err;
9eccfe10 3368 struct sockaddr_in *sin = (struct sockaddr_in *)
170003c8 3369 &ep->com.local_addr;
830662f6
VP
3370
3371 if (dev->rdev.lldi.enable_fw_ofld_conn) {
3372 do {
3373 err = cxgb4_create_server_filter(
3374 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3375 sin->sin_addr.s_addr, sin->sin_port, 0,
3376 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3377 if (err == -EBUSY) {
99718e59
H
3378 if (c4iw_fatal_error(&ep->com.dev->rdev)) {
3379 err = -EIO;
3380 break;
3381 }
830662f6
VP
3382 set_current_state(TASK_UNINTERRUPTIBLE);
3383 schedule_timeout(usecs_to_jiffies(100));
3384 }
3385 } while (err == -EBUSY);
3386 } else {
ef885dc6 3387 c4iw_init_wr_wait(ep->com.wr_waitp);
830662f6
VP
3388 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3389 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3390 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3391 if (!err)
3392 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
ef885dc6 3393 ep->com.wr_waitp,
830662f6 3394 0, 0, __func__);
e6b11163
H
3395 else if (err > 0)
3396 err = net_xmit_errno(err);
830662f6
VP
3397 }
3398 if (err)
3399 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3400 , err, ep->stid,
3401 &sin->sin_addr, ntohs(sin->sin_port));
3402 return err;
3403}
3404
cfdda9d7
SW
3405int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3406{
3407 int err = 0;
3408 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3409 struct c4iw_listen_ep *ep;
3410
cfdda9d7
SW
3411 might_sleep();
3412
3413 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3414 if (!ep) {
700456bd 3415 pr_err("%s - cannot alloc ep\n", __func__);
cfdda9d7
SW
3416 err = -ENOMEM;
3417 goto fail1;
3418 }
4a740838 3419 skb_queue_head_init(&ep->com.ep_skb_list);
548ddb19 3420 pr_debug("ep %p\n", ep);
cfdda9d7 3421 ep->com.cm_id = cm_id;
9ca6f7cf 3422 ref_cm_id(&ep->com);
cfdda9d7
SW
3423 ep->com.dev = dev;
3424 ep->backlog = backlog;
170003c8 3425 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
24d44a39 3426 sizeof(ep->com.local_addr));
cfdda9d7
SW
3427
3428 /*
3429 * Allocate a server TID.
3430 */
8c044690
KS
3431 if (dev->rdev.lldi.enable_fw_ofld_conn &&
3432 ep->com.local_addr.ss_family == AF_INET)
830662f6 3433 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
170003c8 3434 cm_id->m_local_addr.ss_family, ep);
1cab775c 3435 else
830662f6 3436 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
170003c8 3437 cm_id->m_local_addr.ss_family, ep);
1cab775c 3438
cfdda9d7 3439 if (ep->stid == -1) {
700456bd 3440 pr_err("%s - cannot alloc stid\n", __func__);
cfdda9d7
SW
3441 err = -ENOMEM;
3442 goto fail2;
3443 }
793dad94 3444 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
9eccfe10 3445
170003c8
SW
3446 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3447 sizeof(ep->com.local_addr));
9eccfe10 3448
cfdda9d7 3449 state_set(&ep->com, LISTEN);
830662f6
VP
3450 if (ep->com.local_addr.ss_family == AF_INET)
3451 err = create_server4(dev, ep);
3452 else
3453 err = create_server6(dev, ep);
cfdda9d7
SW
3454 if (!err) {
3455 cm_id->provider_data = ep;
3456 goto out;
3457 }
8b1bbf36 3458 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
830662f6
VP
3459 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3460 ep->com.local_addr.ss_family);
cfdda9d7 3461fail2:
9ca6f7cf 3462 deref_cm_id(&ep->com);
cfdda9d7
SW
3463 c4iw_put_ep(&ep->com);
3464fail1:
3465out:
3466 return err;
3467}
3468
3469int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3470{
3471 int err;
3472 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3473
548ddb19 3474 pr_debug("ep %p\n", ep);
cfdda9d7
SW
3475
3476 might_sleep();
3477 state_set(&ep->com, DEAD);
830662f6
VP
3478 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3479 ep->com.local_addr.ss_family == AF_INET) {
1cab775c
VP
3480 err = cxgb4_remove_server_filter(
3481 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3482 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3483 } else {
84cc6ac6 3484 struct sockaddr_in6 *sin6;
ef885dc6 3485 c4iw_init_wr_wait(ep->com.wr_waitp);
830662f6
VP
3486 err = cxgb4_remove_server(
3487 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3488 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
1cab775c
VP
3489 if (err)
3490 goto done;
ef885dc6 3491 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
1cab775c 3492 0, 0, __func__);
170003c8 3493 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
84cc6ac6
H
3494 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3495 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
1cab775c 3496 }
793dad94 3497 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
830662f6
VP
3498 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3499 ep->com.local_addr.ss_family);
cfdda9d7 3500done:
9ca6f7cf 3501 deref_cm_id(&ep->com);
cfdda9d7
SW
3502 c4iw_put_ep(&ep->com);
3503 return err;
3504}
3505
3506int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3507{
3508 int ret = 0;
cfdda9d7
SW
3509 int close = 0;
3510 int fatal = 0;
3511 struct c4iw_rdev *rdev;
cfdda9d7 3512
2f5b48c3 3513 mutex_lock(&ep->com.mutex);
cfdda9d7 3514
548ddb19 3515 pr_debug("ep %p state %s, abrupt %d\n", ep,
a9a42886 3516 states[ep->com.state], abrupt);
cfdda9d7 3517
6e410d8f
H
3518 /*
3519 * Ref the ep here in case we have fatal errors causing the
3520 * ep to be released and freed.
3521 */
3522 c4iw_get_ep(&ep->com);
3523
cfdda9d7
SW
3524 rdev = &ep->com.dev->rdev;
3525 if (c4iw_fatal_error(rdev)) {
3526 fatal = 1;
be13b2df 3527 close_complete_upcall(ep, -EIO);
cfdda9d7
SW
3528 ep->com.state = DEAD;
3529 }
3530 switch (ep->com.state) {
3531 case MPA_REQ_WAIT:
3532 case MPA_REQ_SENT:
3533 case MPA_REQ_RCVD:
3534 case MPA_REP_SENT:
3535 case FPDU_MODE:
4a740838 3536 case CONNECTING:
cfdda9d7
SW
3537 close = 1;
3538 if (abrupt)
3539 ep->com.state = ABORTING;
3540 else {
3541 ep->com.state = CLOSING;
12eb5137
SW
3542
3543 /*
3544 * if we close before we see the fw4_ack() then we fix
3545 * up the timer state since we're reusing it.
3546 */
3547 if (ep->mpa_skb &&
3548 test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
3549 clear_bit(STOP_MPA_TIMER, &ep->com.flags);
3550 stop_ep_timer(ep);
3551 }
ca5a2202 3552 start_ep_timer(ep);
cfdda9d7
SW
3553 }
3554 set_bit(CLOSE_SENT, &ep->com.flags);
3555 break;
3556 case CLOSING:
3557 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3558 close = 1;
3559 if (abrupt) {
b33bd0cb 3560 (void)stop_ep_timer(ep);
cfdda9d7
SW
3561 ep->com.state = ABORTING;
3562 } else
3563 ep->com.state = MORIBUND;
3564 }
3565 break;
3566 case MORIBUND:
3567 case ABORTING:
3568 case DEAD:
f48fca4d
BP
3569 pr_debug("ignoring disconnect ep %p state %u\n",
3570 ep, ep->com.state);
cfdda9d7
SW
3571 break;
3572 default:
ba97b749 3573 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
cfdda9d7
SW
3574 break;
3575 }
3576
cfdda9d7 3577 if (close) {
8da7e7a5 3578 if (abrupt) {
793dad94 3579 set_bit(EP_DISC_ABORT, &ep->com.history);
be13b2df 3580 close_complete_upcall(ep, -ECONNRESET);
4a740838 3581 ret = send_abort(ep);
793dad94
VP
3582 } else {
3583 set_bit(EP_DISC_CLOSE, &ep->com.history);
4a740838 3584 ret = send_halfclose(ep);
793dad94 3585 }
88bc230d 3586 if (ret) {
9ca6f7cf 3587 set_bit(EP_DISC_FAIL, &ep->com.history);
88bc230d
H
3588 if (!abrupt) {
3589 stop_ep_timer(ep);
3590 close_complete_upcall(ep, -EIO);
3591 }
c00dcbaf
H
3592 if (ep->com.qp) {
3593 struct c4iw_qp_attributes attrs;
3594
3595 attrs.next_state = C4IW_QP_STATE_ERROR;
3596 ret = c4iw_modify_qp(ep->com.qp->rhp,
3597 ep->com.qp,
3598 C4IW_QP_ATTR_NEXT_STATE,
3599 &attrs, 1);
3600 if (ret)
700456bd 3601 pr_err("%s - qp <- error failed!\n",
c00dcbaf
H
3602 __func__);
3603 }
cfdda9d7 3604 fatal = 1;
88bc230d 3605 }
cfdda9d7 3606 }
8da7e7a5 3607 mutex_unlock(&ep->com.mutex);
6e410d8f 3608 c4iw_put_ep(&ep->com);
cfdda9d7
SW
3609 if (fatal)
3610 release_ep_resources(ep);
3611 return ret;
3612}
3613
1cab775c
VP
3614static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3615 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3616{
3617 struct c4iw_ep *ep;
793dad94 3618 int atid = be32_to_cpu(req->tid);
1cab775c 3619
ef5d6355
VP
3620 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3621 (__force u32) req->tid);
1cab775c
VP
3622 if (!ep)
3623 return;
3624
3625 switch (req->retval) {
3626 case FW_ENOMEM:
793dad94
VP
3627 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3628 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3629 send_fw_act_open_req(ep, atid);
3630 return;
3631 }
9ae970e2 3632 /* fall through */
1cab775c 3633 case FW_EADDRINUSE:
793dad94
VP
3634 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3635 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3636 send_fw_act_open_req(ep, atid);
3637 return;
3638 }
1cab775c
VP
3639 break;
3640 default:
3641 pr_info("%s unexpected ofld conn wr retval %d\n",
3642 __func__, req->retval);
3643 break;
3644 }
793dad94
VP
3645 pr_err("active ofld_connect_wr failure %d atid %d\n",
3646 req->retval, atid);
3647 mutex_lock(&dev->rdev.stats.lock);
3648 dev->rdev.stats.act_ofld_conn_fails++;
3649 mutex_unlock(&dev->rdev.stats.lock);
1cab775c 3650 connect_reply_upcall(ep, status2errno(req->retval));
793dad94 3651 state_set(&ep->com, DEAD);
84cc6ac6
H
3652 if (ep->com.remote_addr.ss_family == AF_INET6) {
3653 struct sockaddr_in6 *sin6 =
170003c8 3654 (struct sockaddr_in6 *)&ep->com.local_addr;
84cc6ac6
H
3655 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3656 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3657 }
793dad94
VP
3658 remove_handle(dev, &dev->atid_idr, atid);
3659 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3660 dst_release(ep->dst);
3661 cxgb4_l2t_release(ep->l2t);
3662 c4iw_put_ep(&ep->com);
1cab775c
VP
3663}
3664
3665static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3666 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3667{
3668 struct sk_buff *rpl_skb;
3669 struct cpl_pass_accept_req *cpl;
3670 int ret;
3671
710a3110 3672 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
1cab775c 3673 if (req->retval) {
4d45b757 3674 pr_err("%s passive open failure %d\n", __func__, req->retval);
793dad94
VP
3675 mutex_lock(&dev->rdev.stats.lock);
3676 dev->rdev.stats.pas_ofld_conn_fails++;
3677 mutex_unlock(&dev->rdev.stats.lock);
1cab775c
VP
3678 kfree_skb(rpl_skb);
3679 } else {
3680 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3681 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
ef5d6355
VP
3682 (__force u32) htonl(
3683 (__force u32) req->tid)));
1cab775c
VP
3684 ret = pass_accept_req(dev, rpl_skb);
3685 if (!ret)
3686 kfree_skb(rpl_skb);
3687 }
3688 return;
3689}
3690
3691static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2f5b48c3
SW
3692{
3693 struct cpl_fw6_msg *rpl = cplhdr(skb);
1cab775c
VP
3694 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3695
3696 switch (rpl->type) {
3697 case FW6_TYPE_CQE:
3698 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3699 break;
3700 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3701 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3702 switch (req->t_state) {
3703 case TCP_SYN_SENT:
3704 active_ofld_conn_reply(dev, skb, req);
3705 break;
3706 case TCP_SYN_RECV:
3707 passive_ofld_conn_reply(dev, skb, req);
3708 break;
3709 default:
3710 pr_err("%s unexpected ofld conn wr state %d\n",
3711 __func__, req->t_state);
3712 break;
3713 }
3714 break;
3715 }
3716 return 0;
3717}
3718
3719static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3720{
963cab50
H
3721 __be32 l2info;
3722 __be16 hdr_len, vlantag, len;
3723 u16 eth_hdr_len;
3724 int tcp_hdr_len, ip_hdr_len;
1cab775c
VP
3725 u8 intf;
3726 struct cpl_rx_pkt *cpl = cplhdr(skb);
3727 struct cpl_pass_accept_req *req;
3728 struct tcp_options_received tmp_opt;
f079af7a 3729 struct c4iw_dev *dev;
963cab50 3730 enum chip_type type;
1cab775c 3731
f079af7a 3732 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
1cab775c 3733 /* Store values from cpl_rx_pkt in temporary location. */
963cab50
H
3734 vlantag = cpl->vlan;
3735 len = cpl->len;
3736 l2info = cpl->l2info;
3737 hdr_len = cpl->hdr_len;
1cab775c
VP
3738 intf = cpl->iff;
3739
3740 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3741
3742 /*
3743 * We need to parse the TCP options from SYN packet.
3744 * to generate cpl_pass_accept_req.
3745 */
3746 memset(&tmp_opt, 0, sizeof(tmp_opt));
3747 tcp_clear_options(&tmp_opt);
eed29f17 3748 tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL);
1cab775c 3749
d58ff351 3750 req = __skb_push(skb, sizeof(*req));
1cab775c 3751 memset(req, 0, sizeof(*req));
cf7fe64a
HS
3752 req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
3753 SYN_MAC_IDX_V(RX_MACIDX_G(
963cab50 3754 be32_to_cpu(l2info))) |
cf7fe64a 3755 SYN_XACT_MATCH_F);
963cab50
H
3756 type = dev->rdev.lldi.adapter_type;
3757 tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
3758 ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
3759 req->hdr_len =
3760 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
3761 if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
3762 eth_hdr_len = is_t4(type) ?
3763 RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
3764 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
3765 req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
3766 IP_HDR_LEN_V(ip_hdr_len) |
3767 ETH_HDR_LEN_V(eth_hdr_len));
3768 } else { /* T6 and later */
3769 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
3770 req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
3771 T6_IP_HDR_LEN_V(ip_hdr_len) |
3772 T6_ETH_HDR_LEN_V(eth_hdr_len));
3773 }
3774 req->vlan = vlantag;
3775 req->len = len;
6c53e938
HS
3776 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
3777 PASS_OPEN_TOS_V(tos));
1cab775c
VP
3778 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3779 if (tmp_opt.wscale_ok)
3780 req->tcpopt.wsf = tmp_opt.snd_wscale;
3781 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3782 if (tmp_opt.sack_ok)
3783 req->tcpopt.sack = 1;
3784 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3785 return;
3786}
3787
3788static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3789 __be32 laddr, __be16 lport,
3790 __be32 raddr, __be16 rport,
3791 u32 rcv_isn, u32 filter, u16 window,
3792 u32 rss_qid, u8 port_id)
3793{
3794 struct sk_buff *req_skb;
3795 struct fw_ofld_connection_wr *req;
3796 struct cpl_pass_accept_req *cpl = cplhdr(skb);
1ce1d471 3797 int ret;
1cab775c
VP
3798
3799 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
9ef63f31
PB
3800 if (!req_skb)
3801 return;
de77b966 3802 req = __skb_put_zero(req_skb, sizeof(*req));
6c53e938 3803 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
e2ac9628 3804 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
77a80e23 3805 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
ef5d6355 3806 req->le.filter = (__force __be32) filter;
1cab775c
VP
3807 req->le.lport = lport;
3808 req->le.pport = rport;
3809 req->le.u.ipv4.lip = laddr;
3810 req->le.u.ipv4.pip = raddr;
3811 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3812 req->tcb.rcv_adv = htons(window);
3813 req->tcb.t_state_to_astid =
77a80e23
HS
3814 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
3815 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
3816 FW_OFLD_CONNECTION_WR_ASTID_V(
6c53e938 3817 PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
1cab775c
VP
3818
3819 /*
3820 * We store the qid in opt2 which will be used by the firmware
3821 * to send us the wr response.
3822 */
d7990b0c 3823 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
1cab775c
VP
3824
3825 /*
3826 * We initialize the MSS index in TCB to 0xF.
3827 * So that when driver sends cpl_pass_accept_rpl
3828 * TCB picks up the correct value. If this was 0
3829 * TP will ignore any value > 0 for MSS index.
3830 */
d7990b0c 3831 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
6198dd8d 3832 req->cookie = (uintptr_t)skb;
1cab775c
VP
3833
3834 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
1ce1d471
SW
3835 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3836 if (ret < 0) {
3837 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
3838 ret);
3839 kfree_skb(skb);
3840 kfree_skb(req_skb);
3841 }
1cab775c
VP
3842}
3843
3844/*
3845 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3846 * messages when a filter is being used instead of server to
3847 * redirect a syn packet. When packets hit filter they are redirected
3848 * to the offload queue and driver tries to establish the connection
3849 * using firmware work request.
3850 */
3851static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3852{
3853 int stid;
3854 unsigned int filter;
3855 struct ethhdr *eh = NULL;
3856 struct vlan_ethhdr *vlan_eh = NULL;
3857 struct iphdr *iph;
3858 struct tcphdr *tcph;
3859 struct rss_header *rss = (void *)skb->data;
3860 struct cpl_rx_pkt *cpl = (void *)skb->data;
3861 struct cpl_pass_accept_req *req = (void *)(rss + 1);
3862 struct l2t_entry *e;
3863 struct dst_entry *dst;
f86fac79 3864 struct c4iw_ep *lep = NULL;
1cab775c
VP
3865 u16 window;
3866 struct port_info *pi;
3867 struct net_device *pdev;
f079af7a 3868 u16 rss_qid, eth_hdr_len;
1cab775c 3869 int step;
1cab775c
VP
3870 struct neighbour *neigh;
3871
3872 /* Drop all non-SYN packets */
bdc590b9 3873 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
1cab775c
VP
3874 goto reject;
3875
3876 /*
3877 * Drop all packets which did not hit the filter.
3878 * Unlikely to happen.
3879 */
3880 if (!(rss->filter_hit && rss->filter_tid))
3881 goto reject;
3882
3883 /*
3884 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3885 */
a4ea025f 3886 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
1cab775c 3887
f86fac79 3888 lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
1cab775c 3889 if (!lep) {
4d45b757
BP
3890 pr_warn("%s connect request on invalid stid %d\n",
3891 __func__, stid);
1cab775c
VP
3892 goto reject;
3893 }
3894
963cab50
H
3895 switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
3896 case CHELSIO_T4:
3897 eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3898 break;
3899 case CHELSIO_T5:
3900 eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3901 break;
3902 case CHELSIO_T6:
3903 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3904 break;
3905 default:
3906 pr_err("T%d Chip is not supported\n",
3907 CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
3908 goto reject;
3909 }
3910
f079af7a 3911 if (eth_hdr_len == ETH_HLEN) {
1cab775c
VP
3912 eh = (struct ethhdr *)(req + 1);
3913 iph = (struct iphdr *)(eh + 1);
3914 } else {
3915 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3916 iph = (struct iphdr *)(vlan_eh + 1);
3917 skb->vlan_tci = ntohs(cpl->vlan);
3918 }
3919
3920 if (iph->version != 0x4)
3921 goto reject;
3922
3923 tcph = (struct tcphdr *)(iph + 1);
3924 skb_set_network_header(skb, (void *)iph - (void *)rss);
3925 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3926 skb_get(skb);
3927
548ddb19 3928 pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
a9a42886
JP
3929 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3930 ntohs(tcph->source), iph->tos);
1cab775c 3931
804c2f3e
VP
3932 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3933 iph->daddr, iph->saddr, tcph->dest,
3934 tcph->source, iph->tos);
830662f6 3935 if (!dst) {
548ddb19 3936 pr_err("%s - failed to find dst entry!\n", __func__);
1cab775c
VP
3937 goto reject;
3938 }
1cab775c
VP
3939 neigh = dst_neigh_lookup_skb(dst, skb);
3940
aaa0c23c 3941 if (!neigh) {
548ddb19 3942 pr_err("%s - failed to allocate neigh!\n", __func__);
aaa0c23c
ZZ
3943 goto free_dst;
3944 }
3945
1cab775c
VP
3946 if (neigh->dev->flags & IFF_LOOPBACK) {
3947 pdev = ip_dev_find(&init_net, iph->daddr);
3948 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3949 pdev, 0);
3950 pi = (struct port_info *)netdev_priv(pdev);
1cab775c
VP
3951 dev_put(pdev);
3952 } else {
830662f6 3953 pdev = get_real_dev(neigh->dev);
1cab775c 3954 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
830662f6
VP
3955 pdev, 0);
3956 pi = (struct port_info *)netdev_priv(pdev);
1cab775c 3957 }
ebf00060 3958 neigh_release(neigh);
1cab775c
VP
3959 if (!e) {
3960 pr_err("%s - failed to allocate l2t entry!\n",
3961 __func__);
3962 goto free_dst;
3963 }
3964
3965 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3966 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
ef5d6355 3967 window = (__force u16) htons((__force u16)tcph->window);
1cab775c
VP
3968
3969 /* Calcuate filter portion for LE region. */
41b4f86c
KS
3970 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3971 dev->rdev.lldi.ports[0],
3972 e));
1cab775c
VP
3973
3974 /*
3975 * Synthesize the cpl_pass_accept_req. We have everything except the
3976 * TID. Once firmware sends a reply with TID we update the TID field
3977 * in cpl and pass it through the regular cpl_pass_accept_req path.
3978 */
3979 build_cpl_pass_accept_req(skb, stid, iph->tos);
3980 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3981 tcph->source, ntohl(tcph->seq), filter, window,
3982 rss_qid, pi->port_id);
3983 cxgb4_l2t_release(e);
3984free_dst:
3985 dst_release(dst);
3986reject:
f86fac79
H
3987 if (lep)
3988 c4iw_put_ep(&lep->com);
2f5b48c3
SW
3989 return 0;
3990}
3991
be4c9bad
RD
3992/*
3993 * These are the real handlers that are called from a
3994 * work queue.
3995 */
9dec900c 3996static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
be4c9bad
RD
3997 [CPL_ACT_ESTABLISH] = act_establish,
3998 [CPL_ACT_OPEN_RPL] = act_open_rpl,
3999 [CPL_RX_DATA] = rx_data,
4000 [CPL_ABORT_RPL_RSS] = abort_rpl,
4001 [CPL_ABORT_RPL] = abort_rpl,
4002 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
4003 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
4004 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
4005 [CPL_PASS_ESTABLISH] = pass_establish,
4006 [CPL_PEER_CLOSE] = peer_close,
4007 [CPL_ABORT_REQ_RSS] = peer_abort,
4008 [CPL_CLOSE_CON_RPL] = close_con_rpl,
4009 [CPL_RDMA_TERMINATE] = terminate,
2f5b48c3 4010 [CPL_FW4_ACK] = fw4_ack,
1cab775c 4011 [CPL_FW6_MSG] = deferred_fw6_msg,
9dec900c 4012 [CPL_RX_PKT] = rx_pkt,
8d1f1a6b
H
4013 [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
4014 [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
be4c9bad
RD
4015};
4016
4017static void process_timeout(struct c4iw_ep *ep)
4018{
4019 struct c4iw_qp_attributes attrs;
4020 int abort = 1;
4021
2f5b48c3 4022 mutex_lock(&ep->com.mutex);
548ddb19 4023 pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
793dad94 4024 set_bit(TIMEDOUT, &ep->com.history);
be4c9bad
RD
4025 switch (ep->com.state) {
4026 case MPA_REQ_SENT:
be4c9bad
RD
4027 connect_reply_upcall(ep, -ETIMEDOUT);
4028 break;
4029 case MPA_REQ_WAIT:
ceb110a8 4030 case MPA_REQ_RCVD:
e4b76a2a 4031 case MPA_REP_SENT:
ceb110a8 4032 case FPDU_MODE:
be4c9bad
RD
4033 break;
4034 case CLOSING:
4035 case MORIBUND:
4036 if (ep->com.cm_id && ep->com.qp) {
4037 attrs.next_state = C4IW_QP_STATE_ERROR;
4038 c4iw_modify_qp(ep->com.qp->rhp,
4039 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
4040 &attrs, 1);
4041 }
be13b2df 4042 close_complete_upcall(ep, -ETIMEDOUT);
be4c9bad 4043 break;
b33bd0cb
SW
4044 case ABORTING:
4045 case DEAD:
4046
4047 /*
4048 * These states are expected if the ep timed out at the same
4049 * time as another thread was calling stop_ep_timer().
4050 * So we silently do nothing for these states.
4051 */
4052 abort = 0;
4053 break;
be4c9bad 4054 default:
76f267b7 4055 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
be4c9bad 4056 __func__, ep, ep->hwtid, ep->com.state);
be4c9bad
RD
4057 abort = 0;
4058 }
cc18b939 4059 mutex_unlock(&ep->com.mutex);
69736279
H
4060 if (abort)
4061 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
be4c9bad
RD
4062 c4iw_put_ep(&ep->com);
4063}
4064
4065static void process_timedout_eps(void)
4066{
4067 struct c4iw_ep *ep;
4068
4069 spin_lock_irq(&timeout_lock);
4070 while (!list_empty(&timeout_list)) {
4071 struct list_head *tmp;
4072
4073 tmp = timeout_list.next;
4074 list_del(tmp);
b33bd0cb
SW
4075 tmp->next = NULL;
4076 tmp->prev = NULL;
be4c9bad
RD
4077 spin_unlock_irq(&timeout_lock);
4078 ep = list_entry(tmp, struct c4iw_ep, entry);
4079 process_timeout(ep);
4080 spin_lock_irq(&timeout_lock);
4081 }
4082 spin_unlock_irq(&timeout_lock);
4083}
4084
4085static void process_work(struct work_struct *work)
4086{
4087 struct sk_buff *skb = NULL;
4088 struct c4iw_dev *dev;
c1d7356c 4089 struct cpl_act_establish *rpl;
be4c9bad
RD
4090 unsigned int opcode;
4091 int ret;
4092
b33bd0cb 4093 process_timedout_eps();
be4c9bad
RD
4094 while ((skb = skb_dequeue(&rxq))) {
4095 rpl = cplhdr(skb);
4096 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
4097 opcode = rpl->ot.opcode;
4098
ccc04cdd
DC
4099 if (opcode >= ARRAY_SIZE(work_handlers) ||
4100 !work_handlers[opcode]) {
4101 pr_err("No handler for opcode 0x%x.\n", opcode);
be4c9bad 4102 kfree_skb(skb);
ccc04cdd
DC
4103 } else {
4104 ret = work_handlers[opcode](dev, skb);
4105 if (!ret)
4106 kfree_skb(skb);
4107 }
b33bd0cb 4108 process_timedout_eps();
be4c9bad 4109 }
be4c9bad
RD
4110}
4111
4112static DECLARE_WORK(skb_work, process_work);
4113
a9346abe 4114static void ep_timeout(struct timer_list *t)
be4c9bad 4115{
a9346abe 4116 struct c4iw_ep *ep = from_timer(ep, t, timer);
1ec779cc 4117 int kickit = 0;
be4c9bad
RD
4118
4119 spin_lock(&timeout_lock);
1ec779cc 4120 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
b33bd0cb
SW
4121 /*
4122 * Only insert if it is not already on the list.
4123 */
4124 if (!ep->entry.next) {
4125 list_add_tail(&ep->entry, &timeout_list);
4126 kickit = 1;
4127 }
1ec779cc 4128 }
be4c9bad 4129 spin_unlock(&timeout_lock);
1ec779cc
VP
4130 if (kickit)
4131 queue_work(workq, &skb_work);
be4c9bad
RD
4132}
4133
cfdda9d7
SW
4134/*
4135 * All the CM events are handled on a work queue to have a safe context.
4136 */
4137static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
4138{
4139
4140 /*
4141 * Save dev in the skb->cb area.
4142 */
4143 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
4144
4145 /*
4146 * Queue the skb and schedule the worker thread.
4147 */
4148 skb_queue_tail(&rxq, skb);
4149 queue_work(workq, &skb_work);
4150 return 0;
4151}
4152
4153static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
4154{
4155 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
4156
4157 if (rpl->status != CPL_ERR_NONE) {
700456bd
JP
4158 pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
4159 rpl->status, GET_TID(rpl));
cfdda9d7 4160 }
2f5b48c3 4161 kfree_skb(skb);
cfdda9d7
SW
4162 return 0;
4163}
4164
be4c9bad
RD
4165static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
4166{
4167 struct cpl_fw6_msg *rpl = cplhdr(skb);
4168 struct c4iw_wr_wait *wr_waitp;
4169 int ret;
4170
548ddb19 4171 pr_debug("type %u\n", rpl->type);
be4c9bad
RD
4172
4173 switch (rpl->type) {
5be78ee9 4174 case FW6_TYPE_WR_RPL:
be4c9bad 4175 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
c8e081a1 4176 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
548ddb19 4177 pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
d9594d99 4178 if (wr_waitp)
2015f26c 4179 c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
2f5b48c3 4180 kfree_skb(skb);
be4c9bad 4181 break;
5be78ee9 4182 case FW6_TYPE_CQE:
5be78ee9 4183 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
1cab775c 4184 sched(dev, skb);
5be78ee9 4185 break;
be4c9bad 4186 default:
700456bd
JP
4187 pr_err("%s unexpected fw6 msg type %u\n",
4188 __func__, rpl->type);
2f5b48c3 4189 kfree_skb(skb);
be4c9bad
RD
4190 break;
4191 }
4192 return 0;
4193}
4194
8da7e7a5
SW
4195static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
4196{
4197 struct cpl_abort_req_rss *req = cplhdr(skb);
4198 struct c4iw_ep *ep;
8da7e7a5
SW
4199 unsigned int tid = GET_TID(req);
4200
944661dd
H
4201 ep = get_ep_from_tid(dev, tid);
4202 /* This EP will be dereferenced in peer_abort() */
14b92228 4203 if (!ep) {
700456bd 4204 pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
14b92228
SW
4205 kfree_skb(skb);
4206 return 0;
4207 }
b65eef0a 4208 if (cxgb_is_neg_adv(req->status)) {
f48fca4d
BP
4209 pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
4210 ep->hwtid, req->status,
a9a42886 4211 neg_adv_str(req->status));
944661dd 4212 goto out;
8da7e7a5 4213 }
548ddb19 4214 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
8da7e7a5 4215
2015f26c 4216 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
944661dd 4217out:
8da7e7a5
SW
4218 sched(dev, skb);
4219 return 0;
4220}
4221
be4c9bad
RD
4222/*
4223 * Most upcalls from the T4 Core go to sched() to
4224 * schedule the processing on a work queue.
4225 */
4226c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
4227 [CPL_ACT_ESTABLISH] = sched,
4228 [CPL_ACT_OPEN_RPL] = sched,
4229 [CPL_RX_DATA] = sched,
4230 [CPL_ABORT_RPL_RSS] = sched,
4231 [CPL_ABORT_RPL] = sched,
4232 [CPL_PASS_OPEN_RPL] = sched,
4233 [CPL_CLOSE_LISTSRV_RPL] = sched,
4234 [CPL_PASS_ACCEPT_REQ] = sched,
4235 [CPL_PASS_ESTABLISH] = sched,
4236 [CPL_PEER_CLOSE] = sched,
4237 [CPL_CLOSE_CON_RPL] = sched,
8da7e7a5 4238 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
be4c9bad
RD
4239 [CPL_RDMA_TERMINATE] = sched,
4240 [CPL_FW4_ACK] = sched,
4241 [CPL_SET_TCB_RPL] = set_tcb_rpl,
1cab775c
VP
4242 [CPL_FW6_MSG] = fw6_msg,
4243 [CPL_RX_PKT] = sched
be4c9bad
RD
4244};
4245
cfdda9d7
SW
4246int __init c4iw_cm_init(void)
4247{
be4c9bad 4248 spin_lock_init(&timeout_lock);
cfdda9d7
SW
4249 skb_queue_head_init(&rxq);
4250
52ee1a05 4251 workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
cfdda9d7
SW
4252 if (!workq)
4253 return -ENOMEM;
4254
cfdda9d7
SW
4255 return 0;
4256}
4257
46c1376d 4258void c4iw_cm_term(void)
cfdda9d7 4259{
be4c9bad 4260 WARN_ON(!list_empty(&timeout_list));
cfdda9d7
SW
4261 flush_workqueue(workq);
4262 destroy_workqueue(workq);
4263}