Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/module.h> | |
33 | #include <linux/list.h> | |
34 | #include <linux/workqueue.h> | |
35 | #include <linux/skbuff.h> | |
36 | #include <linux/timer.h> | |
37 | #include <linux/notifier.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/ip.h> | |
40 | #include <linux/tcp.h> | |
41 | ||
42 | #include <net/neighbour.h> | |
43 | #include <net/netevent.h> | |
44 | #include <net/route.h> | |
45 | ||
46 | #include "iw_cxgb4.h" | |
47 | ||
48 | static char *states[] = { | |
49 | "idle", | |
50 | "listen", | |
51 | "connecting", | |
52 | "mpa_wait_req", | |
53 | "mpa_req_sent", | |
54 | "mpa_req_rcvd", | |
55 | "mpa_rep_sent", | |
56 | "fpdu_mode", | |
57 | "aborting", | |
58 | "closing", | |
59 | "moribund", | |
60 | "dead", | |
61 | NULL, | |
62 | }; | |
63 | ||
5be78ee9 VP |
64 | static int nocong; |
65 | module_param(nocong, int, 0644); | |
66 | MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); | |
67 | ||
68 | static int enable_ecn; | |
69 | module_param(enable_ecn, int, 0644); | |
70 | MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); | |
71 | ||
b52fe09e | 72 | static int dack_mode = 1; |
ba6d3925 | 73 | module_param(dack_mode, int, 0644); |
b52fe09e | 74 | MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); |
ba6d3925 | 75 | |
be4c9bad RD |
76 | int c4iw_max_read_depth = 8; |
77 | module_param(c4iw_max_read_depth, int, 0644); | |
78 | MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); | |
79 | ||
cfdda9d7 SW |
80 | static int enable_tcp_timestamps; |
81 | module_param(enable_tcp_timestamps, int, 0644); | |
82 | MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); | |
83 | ||
84 | static int enable_tcp_sack; | |
85 | module_param(enable_tcp_sack, int, 0644); | |
86 | MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); | |
87 | ||
88 | static int enable_tcp_window_scaling = 1; | |
89 | module_param(enable_tcp_window_scaling, int, 0644); | |
90 | MODULE_PARM_DESC(enable_tcp_window_scaling, | |
91 | "Enable tcp window scaling (default=1)"); | |
92 | ||
93 | int c4iw_debug; | |
94 | module_param(c4iw_debug, int, 0644); | |
95 | MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); | |
96 | ||
97 | static int peer2peer; | |
98 | module_param(peer2peer, int, 0644); | |
99 | MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); | |
100 | ||
101 | static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; | |
102 | module_param(p2p_type, int, 0644); | |
103 | MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " | |
104 | "1=RDMA_READ 0=RDMA_WRITE (default 1)"); | |
105 | ||
106 | static int ep_timeout_secs = 60; | |
107 | module_param(ep_timeout_secs, int, 0644); | |
108 | MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " | |
109 | "in seconds (default=60)"); | |
110 | ||
111 | static int mpa_rev = 1; | |
112 | module_param(mpa_rev, int, 0644); | |
113 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " | |
d2fe99e8 KS |
114 | "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" |
115 | " compliant (default=1)"); | |
cfdda9d7 SW |
116 | |
117 | static int markers_enabled; | |
118 | module_param(markers_enabled, int, 0644); | |
119 | MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); | |
120 | ||
121 | static int crc_enabled = 1; | |
122 | module_param(crc_enabled, int, 0644); | |
123 | MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); | |
124 | ||
125 | static int rcv_win = 256 * 1024; | |
126 | module_param(rcv_win, int, 0644); | |
127 | MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); | |
128 | ||
98ae68b7 | 129 | static int snd_win = 128 * 1024; |
cfdda9d7 | 130 | module_param(snd_win, int, 0644); |
98ae68b7 | 131 | MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); |
cfdda9d7 | 132 | |
cfdda9d7 | 133 | static struct workqueue_struct *workq; |
cfdda9d7 SW |
134 | |
135 | static struct sk_buff_head rxq; | |
cfdda9d7 SW |
136 | |
137 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); | |
138 | static void ep_timeout(unsigned long arg); | |
139 | static void connect_reply_upcall(struct c4iw_ep *ep, int status); | |
140 | ||
be4c9bad RD |
141 | static LIST_HEAD(timeout_list); |
142 | static spinlock_t timeout_lock; | |
143 | ||
cfdda9d7 SW |
144 | static void start_ep_timer(struct c4iw_ep *ep) |
145 | { | |
146 | PDBG("%s ep %p\n", __func__, ep); | |
147 | if (timer_pending(&ep->timer)) { | |
148 | PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); | |
149 | del_timer_sync(&ep->timer); | |
150 | } else | |
151 | c4iw_get_ep(&ep->com); | |
152 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; | |
153 | ep->timer.data = (unsigned long)ep; | |
154 | ep->timer.function = ep_timeout; | |
155 | add_timer(&ep->timer); | |
156 | } | |
157 | ||
158 | static void stop_ep_timer(struct c4iw_ep *ep) | |
159 | { | |
160 | PDBG("%s ep %p\n", __func__, ep); | |
161 | if (!timer_pending(&ep->timer)) { | |
76f267b7 | 162 | WARN(1, "%s timer stopped when its not running! " |
cfdda9d7 | 163 | "ep %p state %u\n", __func__, ep, ep->com.state); |
cfdda9d7 SW |
164 | return; |
165 | } | |
166 | del_timer_sync(&ep->timer); | |
167 | c4iw_put_ep(&ep->com); | |
168 | } | |
169 | ||
170 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | |
171 | struct l2t_entry *l2e) | |
172 | { | |
173 | int error = 0; | |
174 | ||
175 | if (c4iw_fatal_error(rdev)) { | |
176 | kfree_skb(skb); | |
177 | PDBG("%s - device in error state - dropping\n", __func__); | |
178 | return -EIO; | |
179 | } | |
180 | error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); | |
181 | if (error < 0) | |
182 | kfree_skb(skb); | |
74594861 | 183 | return error < 0 ? error : 0; |
cfdda9d7 SW |
184 | } |
185 | ||
186 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) | |
187 | { | |
188 | int error = 0; | |
189 | ||
190 | if (c4iw_fatal_error(rdev)) { | |
191 | kfree_skb(skb); | |
192 | PDBG("%s - device in error state - dropping\n", __func__); | |
193 | return -EIO; | |
194 | } | |
195 | error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); | |
196 | if (error < 0) | |
197 | kfree_skb(skb); | |
74594861 | 198 | return error < 0 ? error : 0; |
cfdda9d7 SW |
199 | } |
200 | ||
201 | static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) | |
202 | { | |
203 | struct cpl_tid_release *req; | |
204 | ||
205 | skb = get_skb(skb, sizeof *req, GFP_KERNEL); | |
206 | if (!skb) | |
207 | return; | |
208 | req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); | |
209 | INIT_TP_WR(req, hwtid); | |
210 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); | |
211 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | |
212 | c4iw_ofld_send(rdev, skb); | |
213 | return; | |
214 | } | |
215 | ||
216 | static void set_emss(struct c4iw_ep *ep, u16 opt) | |
217 | { | |
218 | ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; | |
219 | ep->mss = ep->emss; | |
220 | if (GET_TCPOPT_TSTAMP(opt)) | |
221 | ep->emss -= 12; | |
222 | if (ep->emss < 128) | |
223 | ep->emss = 128; | |
224 | PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), | |
225 | ep->mss, ep->emss); | |
226 | } | |
227 | ||
228 | static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) | |
229 | { | |
cfdda9d7 SW |
230 | enum c4iw_ep_state state; |
231 | ||
2f5b48c3 | 232 | mutex_lock(&epc->mutex); |
cfdda9d7 | 233 | state = epc->state; |
2f5b48c3 | 234 | mutex_unlock(&epc->mutex); |
cfdda9d7 SW |
235 | return state; |
236 | } | |
237 | ||
238 | static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | |
239 | { | |
240 | epc->state = new; | |
241 | } | |
242 | ||
243 | static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | |
244 | { | |
2f5b48c3 | 245 | mutex_lock(&epc->mutex); |
cfdda9d7 SW |
246 | PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); |
247 | __state_set(epc, new); | |
2f5b48c3 | 248 | mutex_unlock(&epc->mutex); |
cfdda9d7 SW |
249 | return; |
250 | } | |
251 | ||
252 | static void *alloc_ep(int size, gfp_t gfp) | |
253 | { | |
254 | struct c4iw_ep_common *epc; | |
255 | ||
256 | epc = kzalloc(size, gfp); | |
257 | if (epc) { | |
258 | kref_init(&epc->kref); | |
2f5b48c3 | 259 | mutex_init(&epc->mutex); |
aadc4df3 | 260 | c4iw_init_wr_wait(&epc->wr_wait); |
cfdda9d7 SW |
261 | } |
262 | PDBG("%s alloc ep %p\n", __func__, epc); | |
263 | return epc; | |
264 | } | |
265 | ||
266 | void _c4iw_free_ep(struct kref *kref) | |
267 | { | |
268 | struct c4iw_ep *ep; | |
269 | ||
270 | ep = container_of(kref, struct c4iw_ep, com.kref); | |
271 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); | |
272 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | |
273 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | |
274 | dst_release(ep->dst); | |
275 | cxgb4_l2t_release(ep->l2t); | |
276 | } | |
277 | kfree(ep); | |
278 | } | |
279 | ||
280 | static void release_ep_resources(struct c4iw_ep *ep) | |
281 | { | |
282 | set_bit(RELEASE_RESOURCES, &ep->com.flags); | |
283 | c4iw_put_ep(&ep->com); | |
284 | } | |
285 | ||
cfdda9d7 SW |
286 | static int status2errno(int status) |
287 | { | |
288 | switch (status) { | |
289 | case CPL_ERR_NONE: | |
290 | return 0; | |
291 | case CPL_ERR_CONN_RESET: | |
292 | return -ECONNRESET; | |
293 | case CPL_ERR_ARP_MISS: | |
294 | return -EHOSTUNREACH; | |
295 | case CPL_ERR_CONN_TIMEDOUT: | |
296 | return -ETIMEDOUT; | |
297 | case CPL_ERR_TCAM_FULL: | |
298 | return -ENOMEM; | |
299 | case CPL_ERR_CONN_EXIST: | |
300 | return -EADDRINUSE; | |
301 | default: | |
302 | return -EIO; | |
303 | } | |
304 | } | |
305 | ||
306 | /* | |
307 | * Try and reuse skbs already allocated... | |
308 | */ | |
309 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) | |
310 | { | |
311 | if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { | |
312 | skb_trim(skb, 0); | |
313 | skb_get(skb); | |
314 | skb_reset_transport_header(skb); | |
315 | } else { | |
316 | skb = alloc_skb(len, gfp); | |
317 | } | |
318 | return skb; | |
319 | } | |
320 | ||
321 | static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, | |
322 | __be32 peer_ip, __be16 local_port, | |
323 | __be16 peer_port, u8 tos) | |
324 | { | |
325 | struct rtable *rt; | |
31e4543d | 326 | struct flowi4 fl4; |
78fbfd8a | 327 | |
31e4543d | 328 | rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, |
78fbfd8a DM |
329 | peer_port, local_port, IPPROTO_TCP, |
330 | tos, 0); | |
b23dd4fe | 331 | if (IS_ERR(rt)) |
cfdda9d7 SW |
332 | return NULL; |
333 | return rt; | |
334 | } | |
335 | ||
336 | static void arp_failure_discard(void *handle, struct sk_buff *skb) | |
337 | { | |
338 | PDBG("%s c4iw_dev %p\n", __func__, handle); | |
339 | kfree_skb(skb); | |
340 | } | |
341 | ||
342 | /* | |
343 | * Handle an ARP failure for an active open. | |
344 | */ | |
345 | static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) | |
346 | { | |
347 | printk(KERN_ERR MOD "ARP failure duing connect\n"); | |
348 | kfree_skb(skb); | |
349 | } | |
350 | ||
351 | /* | |
352 | * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant | |
353 | * and send it along. | |
354 | */ | |
355 | static void abort_arp_failure(void *handle, struct sk_buff *skb) | |
356 | { | |
357 | struct c4iw_rdev *rdev = handle; | |
358 | struct cpl_abort_req *req = cplhdr(skb); | |
359 | ||
360 | PDBG("%s rdev %p\n", __func__, rdev); | |
361 | req->cmd = CPL_ABORT_NO_RST; | |
362 | c4iw_ofld_send(rdev, skb); | |
363 | } | |
364 | ||
365 | static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) | |
366 | { | |
367 | unsigned int flowclen = 80; | |
368 | struct fw_flowc_wr *flowc; | |
369 | int i; | |
370 | ||
371 | skb = get_skb(skb, flowclen, GFP_KERNEL); | |
372 | flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); | |
373 | ||
374 | flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | | |
375 | FW_FLOWC_WR_NPARAMS(8)); | |
376 | flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, | |
377 | 16)) | FW_WR_FLOWID(ep->hwtid)); | |
378 | ||
379 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; | |
94788657 | 380 | flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); |
cfdda9d7 SW |
381 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; |
382 | flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); | |
383 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; | |
384 | flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); | |
385 | flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; | |
386 | flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); | |
387 | flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; | |
388 | flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); | |
389 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; | |
390 | flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); | |
391 | flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; | |
392 | flowc->mnemval[6].val = cpu_to_be32(snd_win); | |
393 | flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; | |
394 | flowc->mnemval[7].val = cpu_to_be32(ep->emss); | |
395 | /* Pad WR to 16 byte boundary */ | |
396 | flowc->mnemval[8].mnemonic = 0; | |
397 | flowc->mnemval[8].val = 0; | |
398 | for (i = 0; i < 9; i++) { | |
399 | flowc->mnemval[i].r4[0] = 0; | |
400 | flowc->mnemval[i].r4[1] = 0; | |
401 | flowc->mnemval[i].r4[2] = 0; | |
402 | } | |
403 | ||
404 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
405 | c4iw_ofld_send(&ep->com.dev->rdev, skb); | |
406 | } | |
407 | ||
408 | static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) | |
409 | { | |
410 | struct cpl_close_con_req *req; | |
411 | struct sk_buff *skb; | |
412 | int wrlen = roundup(sizeof *req, 16); | |
413 | ||
414 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
415 | skb = get_skb(NULL, wrlen, gfp); | |
416 | if (!skb) { | |
417 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); | |
418 | return -ENOMEM; | |
419 | } | |
420 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
421 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
422 | req = (struct cpl_close_con_req *) skb_put(skb, wrlen); | |
423 | memset(req, 0, wrlen); | |
424 | INIT_TP_WR(req, ep->hwtid); | |
425 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, | |
426 | ep->hwtid)); | |
427 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
428 | } | |
429 | ||
430 | static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |
431 | { | |
432 | struct cpl_abort_req *req; | |
433 | int wrlen = roundup(sizeof *req, 16); | |
434 | ||
435 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
436 | skb = get_skb(skb, wrlen, gfp); | |
437 | if (!skb) { | |
438 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | |
439 | __func__); | |
440 | return -ENOMEM; | |
441 | } | |
442 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
443 | t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); | |
444 | req = (struct cpl_abort_req *) skb_put(skb, wrlen); | |
445 | memset(req, 0, wrlen); | |
446 | INIT_TP_WR(req, ep->hwtid); | |
447 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); | |
448 | req->cmd = CPL_ABORT_SEND_RST; | |
449 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
450 | } | |
451 | ||
5be78ee9 VP |
452 | #define VLAN_NONE 0xfff |
453 | #define FILTER_SEL_VLAN_NONE 0xffff | |
454 | #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ | |
455 | #define FILTER_SEL_WIDTH_VIN_P_FC \ | |
456 | (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ | |
457 | #define FILTER_SEL_WIDTH_TAG_P_FC \ | |
458 | (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ | |
459 | #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) | |
460 | ||
461 | static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, | |
462 | struct l2t_entry *l2t) | |
463 | { | |
464 | unsigned int ntuple = 0; | |
465 | u32 viid; | |
466 | ||
467 | switch (dev->rdev.lldi.filt_mode) { | |
468 | ||
469 | /* default filter mode */ | |
470 | case HW_TPL_FR_MT_PR_IV_P_FC: | |
471 | if (l2t->vlan == VLAN_NONE) | |
472 | ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; | |
473 | else { | |
474 | ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; | |
475 | ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; | |
476 | } | |
477 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | |
478 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | |
479 | break; | |
480 | case HW_TPL_FR_MT_PR_OV_P_FC: { | |
481 | viid = cxgb4_port_viid(l2t->neigh->dev); | |
482 | ||
483 | ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; | |
484 | ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; | |
485 | ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; | |
486 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | |
487 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | |
488 | break; | |
489 | } | |
490 | default: | |
491 | break; | |
492 | } | |
493 | return ntuple; | |
494 | } | |
495 | ||
cfdda9d7 SW |
496 | static int send_connect(struct c4iw_ep *ep) |
497 | { | |
498 | struct cpl_act_open_req *req; | |
499 | struct sk_buff *skb; | |
500 | u64 opt0; | |
501 | u32 opt2; | |
502 | unsigned int mtu_idx; | |
503 | int wscale; | |
504 | int wrlen = roundup(sizeof *req, 16); | |
505 | ||
506 | PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); | |
507 | ||
508 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
509 | if (!skb) { | |
510 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | |
511 | __func__); | |
512 | return -ENOMEM; | |
513 | } | |
d4f1a5c6 | 514 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
cfdda9d7 SW |
515 | |
516 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | |
517 | wscale = compute_wscale(rcv_win); | |
5be78ee9 VP |
518 | opt0 = (nocong ? NO_CONG(1) : 0) | |
519 | KEEP_ALIVE(1) | | |
ba6d3925 | 520 | DELACK(1) | |
cfdda9d7 SW |
521 | WND_SCALE(wscale) | |
522 | MSS_IDX(mtu_idx) | | |
523 | L2T_IDX(ep->l2t->idx) | | |
524 | TX_CHAN(ep->tx_chan) | | |
525 | SMAC_SEL(ep->smac_idx) | | |
526 | DSCP(ep->tos) | | |
b48f3b9c | 527 | ULP_MODE(ULP_MODE_TCPDDP) | |
cfdda9d7 SW |
528 | RCV_BUFSIZ(rcv_win>>10); |
529 | opt2 = RX_CHANNEL(0) | | |
5be78ee9 | 530 | CCTRL_ECN(enable_ecn) | |
cfdda9d7 SW |
531 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); |
532 | if (enable_tcp_timestamps) | |
533 | opt2 |= TSTAMPS_EN(1); | |
534 | if (enable_tcp_sack) | |
535 | opt2 |= SACK_EN(1); | |
536 | if (wscale && enable_tcp_window_scaling) | |
537 | opt2 |= WND_SCALE_EN(1); | |
538 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | |
539 | ||
540 | req = (struct cpl_act_open_req *) skb_put(skb, wrlen); | |
541 | INIT_TP_WR(req, 0); | |
542 | OPCODE_TID(req) = cpu_to_be32( | |
543 | MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); | |
544 | req->local_port = ep->com.local_addr.sin_port; | |
545 | req->peer_port = ep->com.remote_addr.sin_port; | |
546 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | |
547 | req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; | |
548 | req->opt0 = cpu_to_be64(opt0); | |
5be78ee9 | 549 | req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); |
cfdda9d7 SW |
550 | req->opt2 = cpu_to_be32(opt2); |
551 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
552 | } | |
553 | ||
d2fe99e8 KS |
554 | static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, |
555 | u8 mpa_rev_to_use) | |
cfdda9d7 SW |
556 | { |
557 | int mpalen, wrlen; | |
558 | struct fw_ofld_tx_data_wr *req; | |
559 | struct mpa_message *mpa; | |
d2fe99e8 | 560 | struct mpa_v2_conn_params mpa_v2_params; |
cfdda9d7 SW |
561 | |
562 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
563 | ||
564 | BUG_ON(skb_cloned(skb)); | |
565 | ||
566 | mpalen = sizeof(*mpa) + ep->plen; | |
d2fe99e8 KS |
567 | if (mpa_rev_to_use == 2) |
568 | mpalen += sizeof(struct mpa_v2_conn_params); | |
cfdda9d7 SW |
569 | wrlen = roundup(mpalen + sizeof *req, 16); |
570 | skb = get_skb(skb, wrlen, GFP_KERNEL); | |
571 | if (!skb) { | |
572 | connect_reply_upcall(ep, -ENOMEM); | |
573 | return; | |
574 | } | |
575 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
576 | ||
577 | req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); | |
578 | memset(req, 0, wrlen); | |
579 | req->op_to_immdlen = cpu_to_be32( | |
580 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
581 | FW_WR_COMPL(1) | | |
582 | FW_WR_IMMDLEN(mpalen)); | |
583 | req->flowid_len16 = cpu_to_be32( | |
584 | FW_WR_FLOWID(ep->hwtid) | | |
585 | FW_WR_LEN16(wrlen >> 4)); | |
586 | req->plen = cpu_to_be32(mpalen); | |
587 | req->tunnel_to_proxy = cpu_to_be32( | |
588 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
589 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
590 | ||
591 | mpa = (struct mpa_message *)(req + 1); | |
592 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); | |
593 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | | |
d2fe99e8 KS |
594 | (markers_enabled ? MPA_MARKERS : 0) | |
595 | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); | |
cfdda9d7 | 596 | mpa->private_data_size = htons(ep->plen); |
d2fe99e8 | 597 | mpa->revision = mpa_rev_to_use; |
01b225e1 | 598 | if (mpa_rev_to_use == 1) { |
d2fe99e8 | 599 | ep->tried_with_mpa_v1 = 1; |
01b225e1 KS |
600 | ep->retry_with_mpa_v1 = 0; |
601 | } | |
d2fe99e8 KS |
602 | |
603 | if (mpa_rev_to_use == 2) { | |
f747c34a RD |
604 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
605 | sizeof (struct mpa_v2_conn_params)); | |
d2fe99e8 KS |
606 | mpa_v2_params.ird = htons((u16)ep->ird); |
607 | mpa_v2_params.ord = htons((u16)ep->ord); | |
608 | ||
609 | if (peer2peer) { | |
610 | mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); | |
611 | if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) | |
612 | mpa_v2_params.ord |= | |
613 | htons(MPA_V2_RDMA_WRITE_RTR); | |
614 | else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) | |
615 | mpa_v2_params.ord |= | |
616 | htons(MPA_V2_RDMA_READ_RTR); | |
617 | } | |
618 | memcpy(mpa->private_data, &mpa_v2_params, | |
619 | sizeof(struct mpa_v2_conn_params)); | |
cfdda9d7 | 620 | |
d2fe99e8 KS |
621 | if (ep->plen) |
622 | memcpy(mpa->private_data + | |
623 | sizeof(struct mpa_v2_conn_params), | |
624 | ep->mpa_pkt + sizeof(*mpa), ep->plen); | |
625 | } else | |
626 | if (ep->plen) | |
627 | memcpy(mpa->private_data, | |
628 | ep->mpa_pkt + sizeof(*mpa), ep->plen); | |
cfdda9d7 SW |
629 | |
630 | /* | |
631 | * Reference the mpa skb. This ensures the data area | |
632 | * will remain in memory until the hw acks the tx. | |
633 | * Function fw4_ack() will deref it. | |
634 | */ | |
635 | skb_get(skb); | |
636 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
637 | BUG_ON(ep->mpa_skb); | |
638 | ep->mpa_skb = skb; | |
639 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
640 | start_ep_timer(ep); | |
641 | state_set(&ep->com, MPA_REQ_SENT); | |
642 | ep->mpa_attr.initiator = 1; | |
643 | return; | |
644 | } | |
645 | ||
646 | static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |
647 | { | |
648 | int mpalen, wrlen; | |
649 | struct fw_ofld_tx_data_wr *req; | |
650 | struct mpa_message *mpa; | |
651 | struct sk_buff *skb; | |
d2fe99e8 | 652 | struct mpa_v2_conn_params mpa_v2_params; |
cfdda9d7 SW |
653 | |
654 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
655 | ||
656 | mpalen = sizeof(*mpa) + plen; | |
d2fe99e8 KS |
657 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) |
658 | mpalen += sizeof(struct mpa_v2_conn_params); | |
cfdda9d7 SW |
659 | wrlen = roundup(mpalen + sizeof *req, 16); |
660 | ||
661 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
662 | if (!skb) { | |
663 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); | |
664 | return -ENOMEM; | |
665 | } | |
666 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
667 | ||
668 | req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); | |
669 | memset(req, 0, wrlen); | |
670 | req->op_to_immdlen = cpu_to_be32( | |
671 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
672 | FW_WR_COMPL(1) | | |
673 | FW_WR_IMMDLEN(mpalen)); | |
674 | req->flowid_len16 = cpu_to_be32( | |
675 | FW_WR_FLOWID(ep->hwtid) | | |
676 | FW_WR_LEN16(wrlen >> 4)); | |
677 | req->plen = cpu_to_be32(mpalen); | |
678 | req->tunnel_to_proxy = cpu_to_be32( | |
679 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
680 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
681 | ||
682 | mpa = (struct mpa_message *)(req + 1); | |
683 | memset(mpa, 0, sizeof(*mpa)); | |
684 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | |
685 | mpa->flags = MPA_REJECT; | |
686 | mpa->revision = mpa_rev; | |
687 | mpa->private_data_size = htons(plen); | |
d2fe99e8 KS |
688 | |
689 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | |
690 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | |
f747c34a RD |
691 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
692 | sizeof (struct mpa_v2_conn_params)); | |
d2fe99e8 KS |
693 | mpa_v2_params.ird = htons(((u16)ep->ird) | |
694 | (peer2peer ? MPA_V2_PEER2PEER_MODEL : | |
695 | 0)); | |
696 | mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? | |
697 | (p2p_type == | |
698 | FW_RI_INIT_P2PTYPE_RDMA_WRITE ? | |
699 | MPA_V2_RDMA_WRITE_RTR : p2p_type == | |
700 | FW_RI_INIT_P2PTYPE_READ_REQ ? | |
701 | MPA_V2_RDMA_READ_RTR : 0) : 0)); | |
702 | memcpy(mpa->private_data, &mpa_v2_params, | |
703 | sizeof(struct mpa_v2_conn_params)); | |
704 | ||
705 | if (ep->plen) | |
706 | memcpy(mpa->private_data + | |
707 | sizeof(struct mpa_v2_conn_params), pdata, plen); | |
708 | } else | |
709 | if (plen) | |
710 | memcpy(mpa->private_data, pdata, plen); | |
cfdda9d7 SW |
711 | |
712 | /* | |
713 | * Reference the mpa skb again. This ensures the data area | |
714 | * will remain in memory until the hw acks the tx. | |
715 | * Function fw4_ack() will deref it. | |
716 | */ | |
717 | skb_get(skb); | |
718 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
719 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
720 | BUG_ON(ep->mpa_skb); | |
721 | ep->mpa_skb = skb; | |
722 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
723 | } | |
724 | ||
725 | static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |
726 | { | |
727 | int mpalen, wrlen; | |
728 | struct fw_ofld_tx_data_wr *req; | |
729 | struct mpa_message *mpa; | |
730 | struct sk_buff *skb; | |
d2fe99e8 | 731 | struct mpa_v2_conn_params mpa_v2_params; |
cfdda9d7 SW |
732 | |
733 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
734 | ||
735 | mpalen = sizeof(*mpa) + plen; | |
d2fe99e8 KS |
736 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) |
737 | mpalen += sizeof(struct mpa_v2_conn_params); | |
cfdda9d7 SW |
738 | wrlen = roundup(mpalen + sizeof *req, 16); |
739 | ||
740 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
741 | if (!skb) { | |
742 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); | |
743 | return -ENOMEM; | |
744 | } | |
745 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
746 | ||
747 | req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); | |
748 | memset(req, 0, wrlen); | |
749 | req->op_to_immdlen = cpu_to_be32( | |
750 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
751 | FW_WR_COMPL(1) | | |
752 | FW_WR_IMMDLEN(mpalen)); | |
753 | req->flowid_len16 = cpu_to_be32( | |
754 | FW_WR_FLOWID(ep->hwtid) | | |
755 | FW_WR_LEN16(wrlen >> 4)); | |
756 | req->plen = cpu_to_be32(mpalen); | |
757 | req->tunnel_to_proxy = cpu_to_be32( | |
758 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
759 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
760 | ||
761 | mpa = (struct mpa_message *)(req + 1); | |
762 | memset(mpa, 0, sizeof(*mpa)); | |
763 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | |
764 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | | |
765 | (markers_enabled ? MPA_MARKERS : 0); | |
d2fe99e8 | 766 | mpa->revision = ep->mpa_attr.version; |
cfdda9d7 | 767 | mpa->private_data_size = htons(plen); |
d2fe99e8 KS |
768 | |
769 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | |
770 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | |
f747c34a RD |
771 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
772 | sizeof (struct mpa_v2_conn_params)); | |
d2fe99e8 KS |
773 | mpa_v2_params.ird = htons((u16)ep->ird); |
774 | mpa_v2_params.ord = htons((u16)ep->ord); | |
775 | if (peer2peer && (ep->mpa_attr.p2p_type != | |
776 | FW_RI_INIT_P2PTYPE_DISABLED)) { | |
777 | mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); | |
778 | ||
779 | if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) | |
780 | mpa_v2_params.ord |= | |
781 | htons(MPA_V2_RDMA_WRITE_RTR); | |
782 | else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) | |
783 | mpa_v2_params.ord |= | |
784 | htons(MPA_V2_RDMA_READ_RTR); | |
785 | } | |
786 | ||
787 | memcpy(mpa->private_data, &mpa_v2_params, | |
788 | sizeof(struct mpa_v2_conn_params)); | |
789 | ||
790 | if (ep->plen) | |
791 | memcpy(mpa->private_data + | |
792 | sizeof(struct mpa_v2_conn_params), pdata, plen); | |
793 | } else | |
794 | if (plen) | |
795 | memcpy(mpa->private_data, pdata, plen); | |
cfdda9d7 SW |
796 | |
797 | /* | |
798 | * Reference the mpa skb. This ensures the data area | |
799 | * will remain in memory until the hw acks the tx. | |
800 | * Function fw4_ack() will deref it. | |
801 | */ | |
802 | skb_get(skb); | |
803 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
804 | ep->mpa_skb = skb; | |
805 | state_set(&ep->com, MPA_REP_SENT); | |
806 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
807 | } | |
808 | ||
809 | static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |
810 | { | |
811 | struct c4iw_ep *ep; | |
812 | struct cpl_act_establish *req = cplhdr(skb); | |
813 | unsigned int tid = GET_TID(req); | |
814 | unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); | |
815 | struct tid_info *t = dev->rdev.lldi.tids; | |
816 | ||
817 | ep = lookup_atid(t, atid); | |
818 | ||
819 | PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, | |
820 | be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); | |
821 | ||
822 | dst_confirm(ep->dst); | |
823 | ||
824 | /* setup the hwtid for this connection */ | |
825 | ep->hwtid = tid; | |
826 | cxgb4_insert_tid(t, ep, tid); | |
827 | ||
828 | ep->snd_seq = be32_to_cpu(req->snd_isn); | |
829 | ep->rcv_seq = be32_to_cpu(req->rcv_isn); | |
830 | ||
831 | set_emss(ep, ntohs(req->tcp_opt)); | |
832 | ||
833 | /* dealloc the atid */ | |
834 | cxgb4_free_atid(t, atid); | |
835 | ||
836 | /* start MPA negotiation */ | |
837 | send_flowc(ep, NULL); | |
d2fe99e8 KS |
838 | if (ep->retry_with_mpa_v1) |
839 | send_mpa_req(ep, skb, 1); | |
840 | else | |
841 | send_mpa_req(ep, skb, mpa_rev); | |
cfdda9d7 SW |
842 | |
843 | return 0; | |
844 | } | |
845 | ||
846 | static void close_complete_upcall(struct c4iw_ep *ep) | |
847 | { | |
848 | struct iw_cm_event event; | |
849 | ||
850 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
851 | memset(&event, 0, sizeof(event)); | |
852 | event.event = IW_CM_EVENT_CLOSE; | |
853 | if (ep->com.cm_id) { | |
854 | PDBG("close complete delivered ep %p cm_id %p tid %u\n", | |
855 | ep, ep->com.cm_id, ep->hwtid); | |
856 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
857 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
858 | ep->com.cm_id = NULL; | |
859 | ep->com.qp = NULL; | |
860 | } | |
861 | } | |
862 | ||
863 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |
864 | { | |
865 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
866 | close_complete_upcall(ep); | |
867 | state_set(&ep->com, ABORTING); | |
868 | return send_abort(ep, skb, gfp); | |
869 | } | |
870 | ||
871 | static void peer_close_upcall(struct c4iw_ep *ep) | |
872 | { | |
873 | struct iw_cm_event event; | |
874 | ||
875 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
876 | memset(&event, 0, sizeof(event)); | |
877 | event.event = IW_CM_EVENT_DISCONNECT; | |
878 | if (ep->com.cm_id) { | |
879 | PDBG("peer close delivered ep %p cm_id %p tid %u\n", | |
880 | ep, ep->com.cm_id, ep->hwtid); | |
881 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
882 | } | |
883 | } | |
884 | ||
885 | static void peer_abort_upcall(struct c4iw_ep *ep) | |
886 | { | |
887 | struct iw_cm_event event; | |
888 | ||
889 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
890 | memset(&event, 0, sizeof(event)); | |
891 | event.event = IW_CM_EVENT_CLOSE; | |
892 | event.status = -ECONNRESET; | |
893 | if (ep->com.cm_id) { | |
894 | PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, | |
895 | ep->com.cm_id, ep->hwtid); | |
896 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
897 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
898 | ep->com.cm_id = NULL; | |
899 | ep->com.qp = NULL; | |
900 | } | |
901 | } | |
902 | ||
903 | static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |
904 | { | |
905 | struct iw_cm_event event; | |
906 | ||
907 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); | |
908 | memset(&event, 0, sizeof(event)); | |
909 | event.event = IW_CM_EVENT_CONNECT_REPLY; | |
910 | event.status = status; | |
911 | event.local_addr = ep->com.local_addr; | |
912 | event.remote_addr = ep->com.remote_addr; | |
913 | ||
914 | if ((status == 0) || (status == -ECONNREFUSED)) { | |
d2fe99e8 KS |
915 | if (!ep->tried_with_mpa_v1) { |
916 | /* this means MPA_v2 is used */ | |
917 | event.private_data_len = ep->plen - | |
918 | sizeof(struct mpa_v2_conn_params); | |
919 | event.private_data = ep->mpa_pkt + | |
920 | sizeof(struct mpa_message) + | |
921 | sizeof(struct mpa_v2_conn_params); | |
922 | } else { | |
923 | /* this means MPA_v1 is used */ | |
924 | event.private_data_len = ep->plen; | |
925 | event.private_data = ep->mpa_pkt + | |
926 | sizeof(struct mpa_message); | |
927 | } | |
cfdda9d7 | 928 | } |
85963e4c RD |
929 | |
930 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, | |
931 | ep->hwtid, status); | |
932 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
933 | ||
cfdda9d7 SW |
934 | if (status < 0) { |
935 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
936 | ep->com.cm_id = NULL; | |
937 | ep->com.qp = NULL; | |
938 | } | |
939 | } | |
940 | ||
941 | static void connect_request_upcall(struct c4iw_ep *ep) | |
942 | { | |
943 | struct iw_cm_event event; | |
944 | ||
945 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
946 | memset(&event, 0, sizeof(event)); | |
947 | event.event = IW_CM_EVENT_CONNECT_REQUEST; | |
948 | event.local_addr = ep->com.local_addr; | |
949 | event.remote_addr = ep->com.remote_addr; | |
cfdda9d7 | 950 | event.provider_data = ep; |
d2fe99e8 KS |
951 | if (!ep->tried_with_mpa_v1) { |
952 | /* this means MPA_v2 is used */ | |
953 | event.ord = ep->ord; | |
954 | event.ird = ep->ird; | |
955 | event.private_data_len = ep->plen - | |
956 | sizeof(struct mpa_v2_conn_params); | |
957 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + | |
958 | sizeof(struct mpa_v2_conn_params); | |
959 | } else { | |
960 | /* this means MPA_v1 is used. Send max supported */ | |
961 | event.ord = c4iw_max_read_depth; | |
962 | event.ird = c4iw_max_read_depth; | |
963 | event.private_data_len = ep->plen; | |
964 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | |
965 | } | |
cfdda9d7 SW |
966 | if (state_read(&ep->parent_ep->com) != DEAD) { |
967 | c4iw_get_ep(&ep->com); | |
968 | ep->parent_ep->com.cm_id->event_handler( | |
969 | ep->parent_ep->com.cm_id, | |
970 | &event); | |
971 | } | |
972 | c4iw_put_ep(&ep->parent_ep->com); | |
973 | ep->parent_ep = NULL; | |
974 | } | |
975 | ||
976 | static void established_upcall(struct c4iw_ep *ep) | |
977 | { | |
978 | struct iw_cm_event event; | |
979 | ||
980 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
981 | memset(&event, 0, sizeof(event)); | |
982 | event.event = IW_CM_EVENT_ESTABLISHED; | |
d2fe99e8 KS |
983 | event.ird = ep->ird; |
984 | event.ord = ep->ord; | |
cfdda9d7 SW |
985 | if (ep->com.cm_id) { |
986 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
987 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
988 | } | |
989 | } | |
990 | ||
991 | static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |
992 | { | |
993 | struct cpl_rx_data_ack *req; | |
994 | struct sk_buff *skb; | |
995 | int wrlen = roundup(sizeof *req, 16); | |
996 | ||
997 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | |
998 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
999 | if (!skb) { | |
1000 | printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); | |
1001 | return 0; | |
1002 | } | |
1003 | ||
1004 | req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); | |
1005 | memset(req, 0, wrlen); | |
1006 | INIT_TP_WR(req, ep->hwtid); | |
1007 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, | |
1008 | ep->hwtid)); | |
ba6d3925 SW |
1009 | req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | |
1010 | F_RX_DACK_CHANGE | | |
1011 | V_RX_DACK_MODE(dack_mode)); | |
d4f1a5c6 | 1012 | set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); |
cfdda9d7 SW |
1013 | c4iw_ofld_send(&ep->com.dev->rdev, skb); |
1014 | return credits; | |
1015 | } | |
1016 | ||
1017 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |
1018 | { | |
1019 | struct mpa_message *mpa; | |
d2fe99e8 | 1020 | struct mpa_v2_conn_params *mpa_v2_params; |
cfdda9d7 | 1021 | u16 plen; |
d2fe99e8 KS |
1022 | u16 resp_ird, resp_ord; |
1023 | u8 rtr_mismatch = 0, insuff_ird = 0; | |
cfdda9d7 SW |
1024 | struct c4iw_qp_attributes attrs; |
1025 | enum c4iw_qp_attr_mask mask; | |
1026 | int err; | |
1027 | ||
1028 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1029 | ||
1030 | /* | |
1031 | * Stop mpa timer. If it expired, then the state has | |
1032 | * changed and we bail since ep_timeout already aborted | |
1033 | * the connection. | |
1034 | */ | |
1035 | stop_ep_timer(ep); | |
1036 | if (state_read(&ep->com) != MPA_REQ_SENT) | |
1037 | return; | |
1038 | ||
1039 | /* | |
1040 | * If we get more than the supported amount of private data | |
1041 | * then we must fail this connection. | |
1042 | */ | |
1043 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | |
1044 | err = -EINVAL; | |
1045 | goto err; | |
1046 | } | |
1047 | ||
1048 | /* | |
1049 | * copy the new data into our accumulation buffer. | |
1050 | */ | |
1051 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), | |
1052 | skb->len); | |
1053 | ep->mpa_pkt_len += skb->len; | |
1054 | ||
1055 | /* | |
1056 | * if we don't even have the mpa message, then bail. | |
1057 | */ | |
1058 | if (ep->mpa_pkt_len < sizeof(*mpa)) | |
1059 | return; | |
1060 | mpa = (struct mpa_message *) ep->mpa_pkt; | |
1061 | ||
1062 | /* Validate MPA header. */ | |
d2fe99e8 KS |
1063 | if (mpa->revision > mpa_rev) { |
1064 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | |
1065 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | |
cfdda9d7 SW |
1066 | err = -EPROTO; |
1067 | goto err; | |
1068 | } | |
1069 | if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { | |
1070 | err = -EPROTO; | |
1071 | goto err; | |
1072 | } | |
1073 | ||
1074 | plen = ntohs(mpa->private_data_size); | |
1075 | ||
1076 | /* | |
1077 | * Fail if there's too much private data. | |
1078 | */ | |
1079 | if (plen > MPA_MAX_PRIVATE_DATA) { | |
1080 | err = -EPROTO; | |
1081 | goto err; | |
1082 | } | |
1083 | ||
1084 | /* | |
1085 | * If plen does not account for pkt size | |
1086 | */ | |
1087 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | |
1088 | err = -EPROTO; | |
1089 | goto err; | |
1090 | } | |
1091 | ||
1092 | ep->plen = (u8) plen; | |
1093 | ||
1094 | /* | |
1095 | * If we don't have all the pdata yet, then bail. | |
1096 | * We'll continue process when more data arrives. | |
1097 | */ | |
1098 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | |
1099 | return; | |
1100 | ||
1101 | if (mpa->flags & MPA_REJECT) { | |
1102 | err = -ECONNREFUSED; | |
1103 | goto err; | |
1104 | } | |
1105 | ||
1106 | /* | |
1107 | * If we get here we have accumulated the entire mpa | |
1108 | * start reply message including private data. And | |
1109 | * the MPA header is valid. | |
1110 | */ | |
1111 | state_set(&ep->com, FPDU_MODE); | |
1112 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | |
1113 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | |
1114 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | |
d2fe99e8 KS |
1115 | ep->mpa_attr.version = mpa->revision; |
1116 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | |
1117 | ||
1118 | if (mpa->revision == 2) { | |
1119 | ep->mpa_attr.enhanced_rdma_conn = | |
1120 | mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; | |
1121 | if (ep->mpa_attr.enhanced_rdma_conn) { | |
1122 | mpa_v2_params = (struct mpa_v2_conn_params *) | |
1123 | (ep->mpa_pkt + sizeof(*mpa)); | |
1124 | resp_ird = ntohs(mpa_v2_params->ird) & | |
1125 | MPA_V2_IRD_ORD_MASK; | |
1126 | resp_ord = ntohs(mpa_v2_params->ord) & | |
1127 | MPA_V2_IRD_ORD_MASK; | |
1128 | ||
1129 | /* | |
1130 | * This is a double-check. Ideally, below checks are | |
1131 | * not required since ird/ord stuff has been taken | |
1132 | * care of in c4iw_accept_cr | |
1133 | */ | |
1134 | if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { | |
1135 | err = -ENOMEM; | |
1136 | ep->ird = resp_ord; | |
1137 | ep->ord = resp_ird; | |
1138 | insuff_ird = 1; | |
1139 | } | |
1140 | ||
1141 | if (ntohs(mpa_v2_params->ird) & | |
1142 | MPA_V2_PEER2PEER_MODEL) { | |
1143 | if (ntohs(mpa_v2_params->ord) & | |
1144 | MPA_V2_RDMA_WRITE_RTR) | |
1145 | ep->mpa_attr.p2p_type = | |
1146 | FW_RI_INIT_P2PTYPE_RDMA_WRITE; | |
1147 | else if (ntohs(mpa_v2_params->ord) & | |
1148 | MPA_V2_RDMA_READ_RTR) | |
1149 | ep->mpa_attr.p2p_type = | |
1150 | FW_RI_INIT_P2PTYPE_READ_REQ; | |
1151 | } | |
1152 | } | |
1153 | } else if (mpa->revision == 1) | |
1154 | if (peer2peer) | |
1155 | ep->mpa_attr.p2p_type = p2p_type; | |
1156 | ||
cfdda9d7 | 1157 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " |
d2fe99e8 KS |
1158 | "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " |
1159 | "%d\n", __func__, ep->mpa_attr.crc_enabled, | |
1160 | ep->mpa_attr.recv_marker_enabled, | |
1161 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | |
1162 | ep->mpa_attr.p2p_type, p2p_type); | |
1163 | ||
1164 | /* | |
1165 | * If responder's RTR does not match with that of initiator, assign | |
1166 | * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not | |
1167 | * generated when moving QP to RTS state. | |
1168 | * A TERM message will be sent after QP has moved to RTS state | |
1169 | */ | |
91018f86 | 1170 | if ((ep->mpa_attr.version == 2) && peer2peer && |
d2fe99e8 KS |
1171 | (ep->mpa_attr.p2p_type != p2p_type)) { |
1172 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | |
1173 | rtr_mismatch = 1; | |
1174 | } | |
cfdda9d7 SW |
1175 | |
1176 | attrs.mpa_attr = ep->mpa_attr; | |
1177 | attrs.max_ird = ep->ird; | |
1178 | attrs.max_ord = ep->ord; | |
1179 | attrs.llp_stream_handle = ep; | |
1180 | attrs.next_state = C4IW_QP_STATE_RTS; | |
1181 | ||
1182 | mask = C4IW_QP_ATTR_NEXT_STATE | | |
1183 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | | |
1184 | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; | |
1185 | ||
1186 | /* bind QP and TID with INIT_WR */ | |
1187 | err = c4iw_modify_qp(ep->com.qp->rhp, | |
1188 | ep->com.qp, mask, &attrs, 1); | |
1189 | if (err) | |
1190 | goto err; | |
d2fe99e8 KS |
1191 | |
1192 | /* | |
1193 | * If responder's RTR requirement did not match with what initiator | |
1194 | * supports, generate TERM message | |
1195 | */ | |
1196 | if (rtr_mismatch) { | |
1197 | printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); | |
1198 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | |
1199 | attrs.ecode = MPA_NOMATCH_RTR; | |
1200 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | |
1201 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1202 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | |
1203 | err = -ENOMEM; | |
1204 | goto out; | |
1205 | } | |
1206 | ||
1207 | /* | |
1208 | * Generate TERM if initiator IRD is not sufficient for responder | |
1209 | * provided ORD. Currently, we do the same behaviour even when | |
1210 | * responder provided IRD is also not sufficient as regards to | |
1211 | * initiator ORD. | |
1212 | */ | |
1213 | if (insuff_ird) { | |
1214 | printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", | |
1215 | __func__); | |
1216 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | |
1217 | attrs.ecode = MPA_INSUFF_IRD; | |
1218 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | |
1219 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1220 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | |
1221 | err = -ENOMEM; | |
1222 | goto out; | |
1223 | } | |
cfdda9d7 SW |
1224 | goto out; |
1225 | err: | |
b21ef16a SW |
1226 | state_set(&ep->com, ABORTING); |
1227 | send_abort(ep, skb, GFP_KERNEL); | |
cfdda9d7 SW |
1228 | out: |
1229 | connect_reply_upcall(ep, err); | |
1230 | return; | |
1231 | } | |
1232 | ||
1233 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |
1234 | { | |
1235 | struct mpa_message *mpa; | |
d2fe99e8 | 1236 | struct mpa_v2_conn_params *mpa_v2_params; |
cfdda9d7 SW |
1237 | u16 plen; |
1238 | ||
1239 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1240 | ||
1241 | if (state_read(&ep->com) != MPA_REQ_WAIT) | |
1242 | return; | |
1243 | ||
1244 | /* | |
1245 | * If we get more than the supported amount of private data | |
1246 | * then we must fail this connection. | |
1247 | */ | |
1248 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | |
1249 | stop_ep_timer(ep); | |
1250 | abort_connection(ep, skb, GFP_KERNEL); | |
1251 | return; | |
1252 | } | |
1253 | ||
1254 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | |
1255 | ||
1256 | /* | |
1257 | * Copy the new data into our accumulation buffer. | |
1258 | */ | |
1259 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), | |
1260 | skb->len); | |
1261 | ep->mpa_pkt_len += skb->len; | |
1262 | ||
1263 | /* | |
1264 | * If we don't even have the mpa message, then bail. | |
1265 | * We'll continue process when more data arrives. | |
1266 | */ | |
1267 | if (ep->mpa_pkt_len < sizeof(*mpa)) | |
1268 | return; | |
1269 | ||
1270 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | |
1271 | stop_ep_timer(ep); | |
1272 | mpa = (struct mpa_message *) ep->mpa_pkt; | |
1273 | ||
1274 | /* | |
1275 | * Validate MPA Header. | |
1276 | */ | |
d2fe99e8 KS |
1277 | if (mpa->revision > mpa_rev) { |
1278 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | |
1279 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | |
cfdda9d7 SW |
1280 | abort_connection(ep, skb, GFP_KERNEL); |
1281 | return; | |
1282 | } | |
1283 | ||
1284 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | |
1285 | abort_connection(ep, skb, GFP_KERNEL); | |
1286 | return; | |
1287 | } | |
1288 | ||
1289 | plen = ntohs(mpa->private_data_size); | |
1290 | ||
1291 | /* | |
1292 | * Fail if there's too much private data. | |
1293 | */ | |
1294 | if (plen > MPA_MAX_PRIVATE_DATA) { | |
1295 | abort_connection(ep, skb, GFP_KERNEL); | |
1296 | return; | |
1297 | } | |
1298 | ||
1299 | /* | |
1300 | * If plen does not account for pkt size | |
1301 | */ | |
1302 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | |
1303 | abort_connection(ep, skb, GFP_KERNEL); | |
1304 | return; | |
1305 | } | |
1306 | ep->plen = (u8) plen; | |
1307 | ||
1308 | /* | |
1309 | * If we don't have all the pdata yet, then bail. | |
1310 | */ | |
1311 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | |
1312 | return; | |
1313 | ||
1314 | /* | |
1315 | * If we get here we have accumulated the entire mpa | |
1316 | * start reply message including private data. | |
1317 | */ | |
1318 | ep->mpa_attr.initiator = 0; | |
1319 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | |
1320 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | |
1321 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | |
d2fe99e8 KS |
1322 | ep->mpa_attr.version = mpa->revision; |
1323 | if (mpa->revision == 1) | |
1324 | ep->tried_with_mpa_v1 = 1; | |
1325 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | |
1326 | ||
1327 | if (mpa->revision == 2) { | |
1328 | ep->mpa_attr.enhanced_rdma_conn = | |
1329 | mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; | |
1330 | if (ep->mpa_attr.enhanced_rdma_conn) { | |
1331 | mpa_v2_params = (struct mpa_v2_conn_params *) | |
1332 | (ep->mpa_pkt + sizeof(*mpa)); | |
1333 | ep->ird = ntohs(mpa_v2_params->ird) & | |
1334 | MPA_V2_IRD_ORD_MASK; | |
1335 | ep->ord = ntohs(mpa_v2_params->ord) & | |
1336 | MPA_V2_IRD_ORD_MASK; | |
1337 | if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) | |
1338 | if (peer2peer) { | |
1339 | if (ntohs(mpa_v2_params->ord) & | |
1340 | MPA_V2_RDMA_WRITE_RTR) | |
1341 | ep->mpa_attr.p2p_type = | |
1342 | FW_RI_INIT_P2PTYPE_RDMA_WRITE; | |
1343 | else if (ntohs(mpa_v2_params->ord) & | |
1344 | MPA_V2_RDMA_READ_RTR) | |
1345 | ep->mpa_attr.p2p_type = | |
1346 | FW_RI_INIT_P2PTYPE_READ_REQ; | |
1347 | } | |
1348 | } | |
1349 | } else if (mpa->revision == 1) | |
1350 | if (peer2peer) | |
1351 | ep->mpa_attr.p2p_type = p2p_type; | |
1352 | ||
cfdda9d7 SW |
1353 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " |
1354 | "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, | |
1355 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | |
1356 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | |
1357 | ep->mpa_attr.p2p_type); | |
1358 | ||
1359 | state_set(&ep->com, MPA_REQ_RCVD); | |
1360 | ||
1361 | /* drive upcall */ | |
1362 | connect_request_upcall(ep); | |
1363 | return; | |
1364 | } | |
1365 | ||
1366 | static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |
1367 | { | |
1368 | struct c4iw_ep *ep; | |
1369 | struct cpl_rx_data *hdr = cplhdr(skb); | |
1370 | unsigned int dlen = ntohs(hdr->len); | |
1371 | unsigned int tid = GET_TID(hdr); | |
1372 | struct tid_info *t = dev->rdev.lldi.tids; | |
1373 | ||
1374 | ep = lookup_tid(t, tid); | |
1375 | PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); | |
1376 | skb_pull(skb, sizeof(*hdr)); | |
1377 | skb_trim(skb, dlen); | |
1378 | ||
1379 | ep->rcv_seq += dlen; | |
1380 | BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); | |
1381 | ||
1382 | /* update RX credits */ | |
1383 | update_rx_credits(ep, dlen); | |
1384 | ||
1385 | switch (state_read(&ep->com)) { | |
1386 | case MPA_REQ_SENT: | |
1387 | process_mpa_reply(ep, skb); | |
1388 | break; | |
1389 | case MPA_REQ_WAIT: | |
1390 | process_mpa_request(ep, skb); | |
1391 | break; | |
1392 | case MPA_REP_SENT: | |
1393 | break; | |
1394 | default: | |
1395 | printk(KERN_ERR MOD "%s Unexpected streaming data." | |
1396 | " ep %p state %d tid %u\n", | |
1397 | __func__, ep, state_read(&ep->com), ep->hwtid); | |
1398 | ||
1399 | /* | |
1400 | * The ep will timeout and inform the ULP of the failure. | |
1401 | * See ep_timeout(). | |
1402 | */ | |
1403 | break; | |
1404 | } | |
1405 | return 0; | |
1406 | } | |
1407 | ||
1408 | static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1409 | { | |
1410 | struct c4iw_ep *ep; | |
1411 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | |
cfdda9d7 SW |
1412 | int release = 0; |
1413 | unsigned int tid = GET_TID(rpl); | |
1414 | struct tid_info *t = dev->rdev.lldi.tids; | |
1415 | ||
1416 | ep = lookup_tid(t, tid); | |
4984037b VP |
1417 | if (!ep) { |
1418 | printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); | |
1419 | return 0; | |
1420 | } | |
92dd6c3d | 1421 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
2f5b48c3 | 1422 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
1423 | switch (ep->com.state) { |
1424 | case ABORTING: | |
1425 | __state_set(&ep->com, DEAD); | |
1426 | release = 1; | |
1427 | break; | |
1428 | default: | |
1429 | printk(KERN_ERR "%s ep %p state %d\n", | |
1430 | __func__, ep, ep->com.state); | |
1431 | break; | |
1432 | } | |
2f5b48c3 | 1433 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1434 | |
1435 | if (release) | |
1436 | release_ep_resources(ep); | |
1437 | return 0; | |
1438 | } | |
1439 | ||
5be78ee9 VP |
1440 | static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) |
1441 | { | |
1442 | struct sk_buff *skb; | |
1443 | struct fw_ofld_connection_wr *req; | |
1444 | unsigned int mtu_idx; | |
1445 | int wscale; | |
1446 | ||
1447 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
1448 | req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); | |
1449 | memset(req, 0, sizeof(*req)); | |
1450 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); | |
1451 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); | |
1452 | req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, | |
1453 | ep->l2t)); | |
1454 | req->le.lport = ep->com.local_addr.sin_port; | |
1455 | req->le.pport = ep->com.remote_addr.sin_port; | |
1456 | req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr; | |
1457 | req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr; | |
1458 | req->tcb.t_state_to_astid = | |
1459 | htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | | |
1460 | V_FW_OFLD_CONNECTION_WR_ASTID(atid)); | |
1461 | req->tcb.cplrxdataack_cplpassacceptrpl = | |
1462 | htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); | |
1463 | req->tcb.tx_max = jiffies; | |
1464 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | |
1465 | wscale = compute_wscale(rcv_win); | |
1466 | req->tcb.opt0 = TCAM_BYPASS(1) | | |
1467 | (nocong ? NO_CONG(1) : 0) | | |
1468 | KEEP_ALIVE(1) | | |
1469 | DELACK(1) | | |
1470 | WND_SCALE(wscale) | | |
1471 | MSS_IDX(mtu_idx) | | |
1472 | L2T_IDX(ep->l2t->idx) | | |
1473 | TX_CHAN(ep->tx_chan) | | |
1474 | SMAC_SEL(ep->smac_idx) | | |
1475 | DSCP(ep->tos) | | |
1476 | ULP_MODE(ULP_MODE_TCPDDP) | | |
1477 | RCV_BUFSIZ(rcv_win >> 10); | |
1478 | req->tcb.opt2 = PACE(1) | | |
1479 | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | | |
1480 | RX_CHANNEL(0) | | |
1481 | CCTRL_ECN(enable_ecn) | | |
1482 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | |
1483 | if (enable_tcp_timestamps) | |
1484 | req->tcb.opt2 |= TSTAMPS_EN(1); | |
1485 | if (enable_tcp_sack) | |
1486 | req->tcb.opt2 |= SACK_EN(1); | |
1487 | if (wscale && enable_tcp_window_scaling) | |
1488 | req->tcb.opt2 |= WND_SCALE_EN(1); | |
1489 | req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); | |
1490 | req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); | |
1491 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
1492 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
1493 | } | |
1494 | ||
cfdda9d7 SW |
1495 | /* |
1496 | * Return whether a failed active open has allocated a TID | |
1497 | */ | |
1498 | static inline int act_open_has_tid(int status) | |
1499 | { | |
1500 | return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && | |
1501 | status != CPL_ERR_ARP_MISS; | |
1502 | } | |
1503 | ||
1504 | static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1505 | { | |
1506 | struct c4iw_ep *ep; | |
1507 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
1508 | unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( | |
1509 | ntohl(rpl->atid_status))); | |
1510 | struct tid_info *t = dev->rdev.lldi.tids; | |
1511 | int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); | |
1512 | ||
1513 | ep = lookup_atid(t, atid); | |
1514 | ||
1515 | PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, | |
1516 | status, status2errno(status)); | |
1517 | ||
1518 | if (status == CPL_ERR_RTX_NEG_ADVICE) { | |
1519 | printk(KERN_WARNING MOD "Connection problems for atid %u\n", | |
1520 | atid); | |
1521 | return 0; | |
1522 | } | |
1523 | ||
d716a2a0 VP |
1524 | /* |
1525 | * Log interesting failures. | |
1526 | */ | |
1527 | switch (status) { | |
1528 | case CPL_ERR_CONN_RESET: | |
1529 | case CPL_ERR_CONN_TIMEDOUT: | |
1530 | break; | |
5be78ee9 VP |
1531 | case CPL_ERR_TCAM_FULL: |
1532 | mutex_lock(&dev->rdev.stats.lock); | |
1533 | dev->rdev.stats.tcam_full++; | |
1534 | mutex_unlock(&dev->rdev.stats.lock); | |
1535 | send_fw_act_open_req(ep, | |
1536 | GET_TID_TID(GET_AOPEN_ATID(ntohl(rpl->atid_status)))); | |
1537 | return 0; | |
1538 | break; | |
d716a2a0 VP |
1539 | default: |
1540 | printk(KERN_INFO MOD "Active open failure - " | |
1541 | "atid %u status %u errno %d %pI4:%u->%pI4:%u\n", | |
1542 | atid, status, status2errno(status), | |
1543 | &ep->com.local_addr.sin_addr.s_addr, | |
1544 | ntohs(ep->com.local_addr.sin_port), | |
1545 | &ep->com.remote_addr.sin_addr.s_addr, | |
1546 | ntohs(ep->com.remote_addr.sin_port)); | |
1547 | break; | |
1548 | } | |
1549 | ||
cfdda9d7 SW |
1550 | connect_reply_upcall(ep, status2errno(status)); |
1551 | state_set(&ep->com, DEAD); | |
1552 | ||
1553 | if (status && act_open_has_tid(status)) | |
1554 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); | |
1555 | ||
1556 | cxgb4_free_atid(t, atid); | |
1557 | dst_release(ep->dst); | |
1558 | cxgb4_l2t_release(ep->l2t); | |
1559 | c4iw_put_ep(&ep->com); | |
1560 | ||
1561 | return 0; | |
1562 | } | |
1563 | ||
1564 | static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1565 | { | |
1566 | struct cpl_pass_open_rpl *rpl = cplhdr(skb); | |
1567 | struct tid_info *t = dev->rdev.lldi.tids; | |
1568 | unsigned int stid = GET_TID(rpl); | |
1569 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | |
1570 | ||
1571 | if (!ep) { | |
1572 | printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); | |
1573 | return 0; | |
1574 | } | |
1575 | PDBG("%s ep %p status %d error %d\n", __func__, ep, | |
1576 | rpl->status, status2errno(rpl->status)); | |
d9594d99 | 1577 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
cfdda9d7 SW |
1578 | |
1579 | return 0; | |
1580 | } | |
1581 | ||
1582 | static int listen_stop(struct c4iw_listen_ep *ep) | |
1583 | { | |
1584 | struct sk_buff *skb; | |
1585 | struct cpl_close_listsvr_req *req; | |
1586 | ||
1587 | PDBG("%s ep %p\n", __func__, ep); | |
1588 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
1589 | if (!skb) { | |
1590 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); | |
1591 | return -ENOMEM; | |
1592 | } | |
1593 | req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); | |
1594 | INIT_TP_WR(req, 0); | |
1595 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, | |
1596 | ep->stid)); | |
1597 | req->reply_ctrl = cpu_to_be16( | |
1598 | QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); | |
1599 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | |
1600 | return c4iw_ofld_send(&ep->com.dev->rdev, skb); | |
1601 | } | |
1602 | ||
1603 | static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1604 | { | |
1605 | struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); | |
1606 | struct tid_info *t = dev->rdev.lldi.tids; | |
1607 | unsigned int stid = GET_TID(rpl); | |
1608 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | |
1609 | ||
1610 | PDBG("%s ep %p\n", __func__, ep); | |
d9594d99 | 1611 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
cfdda9d7 SW |
1612 | return 0; |
1613 | } | |
1614 | ||
1615 | static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, | |
1616 | struct cpl_pass_accept_req *req) | |
1617 | { | |
1618 | struct cpl_pass_accept_rpl *rpl; | |
1619 | unsigned int mtu_idx; | |
1620 | u64 opt0; | |
1621 | u32 opt2; | |
1622 | int wscale; | |
1623 | ||
1624 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1625 | BUG_ON(skb_cloned(skb)); | |
1626 | skb_trim(skb, sizeof(*rpl)); | |
1627 | skb_get(skb); | |
1628 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | |
1629 | wscale = compute_wscale(rcv_win); | |
5be78ee9 VP |
1630 | opt0 = (nocong ? NO_CONG(1) : 0) | |
1631 | KEEP_ALIVE(1) | | |
ba6d3925 | 1632 | DELACK(1) | |
cfdda9d7 SW |
1633 | WND_SCALE(wscale) | |
1634 | MSS_IDX(mtu_idx) | | |
1635 | L2T_IDX(ep->l2t->idx) | | |
1636 | TX_CHAN(ep->tx_chan) | | |
1637 | SMAC_SEL(ep->smac_idx) | | |
5be78ee9 | 1638 | DSCP(ep->tos >> 2) | |
b48f3b9c | 1639 | ULP_MODE(ULP_MODE_TCPDDP) | |
cfdda9d7 SW |
1640 | RCV_BUFSIZ(rcv_win>>10); |
1641 | opt2 = RX_CHANNEL(0) | | |
1642 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | |
1643 | ||
1644 | if (enable_tcp_timestamps && req->tcpopt.tstamp) | |
1645 | opt2 |= TSTAMPS_EN(1); | |
1646 | if (enable_tcp_sack && req->tcpopt.sack) | |
1647 | opt2 |= SACK_EN(1); | |
1648 | if (wscale && enable_tcp_window_scaling) | |
1649 | opt2 |= WND_SCALE_EN(1); | |
5be78ee9 VP |
1650 | if (enable_ecn) { |
1651 | const struct tcphdr *tcph; | |
1652 | u32 hlen = ntohl(req->hdr_len); | |
1653 | ||
1654 | tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + | |
1655 | G_IP_HDR_LEN(hlen); | |
1656 | if (tcph->ece && tcph->cwr) | |
1657 | opt2 |= CCTRL_ECN(1); | |
1658 | } | |
cfdda9d7 SW |
1659 | |
1660 | rpl = cplhdr(skb); | |
1661 | INIT_TP_WR(rpl, ep->hwtid); | |
1662 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | |
1663 | ep->hwtid)); | |
1664 | rpl->opt0 = cpu_to_be64(opt0); | |
1665 | rpl->opt2 = cpu_to_be32(opt2); | |
d4f1a5c6 | 1666 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
cfdda9d7 SW |
1667 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
1668 | ||
1669 | return; | |
1670 | } | |
1671 | ||
1672 | static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, | |
1673 | struct sk_buff *skb) | |
1674 | { | |
1675 | PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, | |
1676 | peer_ip); | |
1677 | BUG_ON(skb_cloned(skb)); | |
1678 | skb_trim(skb, sizeof(struct cpl_tid_release)); | |
1679 | skb_get(skb); | |
1680 | release_tid(&dev->rdev, hwtid, skb); | |
1681 | return; | |
1682 | } | |
1683 | ||
1684 | static void get_4tuple(struct cpl_pass_accept_req *req, | |
1685 | __be32 *local_ip, __be32 *peer_ip, | |
1686 | __be16 *local_port, __be16 *peer_port) | |
1687 | { | |
1688 | int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); | |
1689 | int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); | |
1690 | struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); | |
1691 | struct tcphdr *tcp = (struct tcphdr *) | |
1692 | ((u8 *)(req + 1) + eth_len + ip_len); | |
1693 | ||
1694 | PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, | |
1695 | ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), | |
1696 | ntohs(tcp->dest)); | |
1697 | ||
1698 | *peer_ip = ip->saddr; | |
1699 | *local_ip = ip->daddr; | |
1700 | *peer_port = tcp->source; | |
1701 | *local_port = tcp->dest; | |
1702 | ||
1703 | return; | |
1704 | } | |
1705 | ||
3786cf18 DM |
1706 | static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, |
1707 | struct c4iw_dev *cdev, bool clear_mpa_v1) | |
1708 | { | |
1709 | struct neighbour *n; | |
1710 | int err, step; | |
1711 | ||
64b7007e | 1712 | n = dst_neigh_lookup(dst, &peer_ip); |
3786cf18 | 1713 | if (!n) |
64b7007e DM |
1714 | return -ENODEV; |
1715 | ||
1716 | rcu_read_lock(); | |
3786cf18 DM |
1717 | err = -ENOMEM; |
1718 | if (n->dev->flags & IFF_LOOPBACK) { | |
1719 | struct net_device *pdev; | |
1720 | ||
1721 | pdev = ip_dev_find(&init_net, peer_ip); | |
71b43fd5 TLSC |
1722 | if (!pdev) { |
1723 | err = -ENODEV; | |
1724 | goto out; | |
1725 | } | |
3786cf18 DM |
1726 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, |
1727 | n, pdev, 0); | |
1728 | if (!ep->l2t) | |
1729 | goto out; | |
1730 | ep->mtu = pdev->mtu; | |
1731 | ep->tx_chan = cxgb4_port_chan(pdev); | |
1732 | ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; | |
1733 | step = cdev->rdev.lldi.ntxq / | |
1734 | cdev->rdev.lldi.nchan; | |
1735 | ep->txq_idx = cxgb4_port_idx(pdev) * step; | |
1736 | step = cdev->rdev.lldi.nrxq / | |
1737 | cdev->rdev.lldi.nchan; | |
1738 | ep->ctrlq_idx = cxgb4_port_idx(pdev); | |
1739 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ | |
1740 | cxgb4_port_idx(pdev) * step]; | |
1741 | dev_put(pdev); | |
1742 | } else { | |
1743 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, | |
1744 | n, n->dev, 0); | |
1745 | if (!ep->l2t) | |
1746 | goto out; | |
bd61baaf | 1747 | ep->mtu = dst_mtu(dst); |
3786cf18 DM |
1748 | ep->tx_chan = cxgb4_port_chan(n->dev); |
1749 | ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; | |
1750 | step = cdev->rdev.lldi.ntxq / | |
1751 | cdev->rdev.lldi.nchan; | |
1752 | ep->txq_idx = cxgb4_port_idx(n->dev) * step; | |
1753 | ep->ctrlq_idx = cxgb4_port_idx(n->dev); | |
1754 | step = cdev->rdev.lldi.nrxq / | |
1755 | cdev->rdev.lldi.nchan; | |
1756 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ | |
1757 | cxgb4_port_idx(n->dev) * step]; | |
1758 | ||
1759 | if (clear_mpa_v1) { | |
1760 | ep->retry_with_mpa_v1 = 0; | |
1761 | ep->tried_with_mpa_v1 = 0; | |
1762 | } | |
1763 | } | |
1764 | err = 0; | |
1765 | out: | |
1766 | rcu_read_unlock(); | |
1767 | ||
64b7007e DM |
1768 | neigh_release(n); |
1769 | ||
3786cf18 DM |
1770 | return err; |
1771 | } | |
1772 | ||
cfdda9d7 SW |
1773 | static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) |
1774 | { | |
1775 | struct c4iw_ep *child_ep, *parent_ep; | |
1776 | struct cpl_pass_accept_req *req = cplhdr(skb); | |
1777 | unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); | |
1778 | struct tid_info *t = dev->rdev.lldi.tids; | |
1779 | unsigned int hwtid = GET_TID(req); | |
1780 | struct dst_entry *dst; | |
cfdda9d7 SW |
1781 | struct rtable *rt; |
1782 | __be32 local_ip, peer_ip; | |
1783 | __be16 local_port, peer_port; | |
3786cf18 | 1784 | int err; |
cfdda9d7 SW |
1785 | |
1786 | parent_ep = lookup_stid(t, stid); | |
1787 | PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); | |
1788 | ||
1789 | get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); | |
1790 | ||
1791 | if (state_read(&parent_ep->com) != LISTEN) { | |
1792 | printk(KERN_ERR "%s - listening ep not in LISTEN\n", | |
1793 | __func__); | |
1794 | goto reject; | |
1795 | } | |
1796 | ||
1797 | /* Find output route */ | |
1798 | rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, | |
1799 | GET_POPEN_TOS(ntohl(req->tos_stid))); | |
1800 | if (!rt) { | |
1801 | printk(KERN_ERR MOD "%s - failed to find dst entry!\n", | |
1802 | __func__); | |
1803 | goto reject; | |
1804 | } | |
d8d1f30b | 1805 | dst = &rt->dst; |
3786cf18 DM |
1806 | |
1807 | child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); | |
1808 | if (!child_ep) { | |
1809 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | |
cfdda9d7 SW |
1810 | __func__); |
1811 | dst_release(dst); | |
1812 | goto reject; | |
1813 | } | |
1814 | ||
3786cf18 DM |
1815 | err = import_ep(child_ep, peer_ip, dst, dev, false); |
1816 | if (err) { | |
1817 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", | |
cfdda9d7 | 1818 | __func__); |
cfdda9d7 | 1819 | dst_release(dst); |
3786cf18 | 1820 | kfree(child_ep); |
cfdda9d7 SW |
1821 | goto reject; |
1822 | } | |
3786cf18 | 1823 | |
cfdda9d7 SW |
1824 | state_set(&child_ep->com, CONNECTING); |
1825 | child_ep->com.dev = dev; | |
1826 | child_ep->com.cm_id = NULL; | |
1827 | child_ep->com.local_addr.sin_family = PF_INET; | |
1828 | child_ep->com.local_addr.sin_port = local_port; | |
1829 | child_ep->com.local_addr.sin_addr.s_addr = local_ip; | |
1830 | child_ep->com.remote_addr.sin_family = PF_INET; | |
1831 | child_ep->com.remote_addr.sin_port = peer_port; | |
1832 | child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; | |
1833 | c4iw_get_ep(&parent_ep->com); | |
1834 | child_ep->parent_ep = parent_ep; | |
1835 | child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); | |
cfdda9d7 SW |
1836 | child_ep->dst = dst; |
1837 | child_ep->hwtid = hwtid; | |
cfdda9d7 SW |
1838 | |
1839 | PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, | |
3786cf18 | 1840 | child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); |
cfdda9d7 SW |
1841 | |
1842 | init_timer(&child_ep->timer); | |
1843 | cxgb4_insert_tid(t, child_ep, hwtid); | |
1844 | accept_cr(child_ep, peer_ip, skb, req); | |
1845 | goto out; | |
1846 | reject: | |
1847 | reject_cr(dev, hwtid, peer_ip, skb); | |
1848 | out: | |
1849 | return 0; | |
1850 | } | |
1851 | ||
1852 | static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |
1853 | { | |
1854 | struct c4iw_ep *ep; | |
1855 | struct cpl_pass_establish *req = cplhdr(skb); | |
1856 | struct tid_info *t = dev->rdev.lldi.tids; | |
1857 | unsigned int tid = GET_TID(req); | |
1858 | ||
1859 | ep = lookup_tid(t, tid); | |
1860 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1861 | ep->snd_seq = be32_to_cpu(req->snd_isn); | |
1862 | ep->rcv_seq = be32_to_cpu(req->rcv_isn); | |
1863 | ||
1864 | set_emss(ep, ntohs(req->tcp_opt)); | |
1865 | ||
1866 | dst_confirm(ep->dst); | |
1867 | state_set(&ep->com, MPA_REQ_WAIT); | |
1868 | start_ep_timer(ep); | |
1869 | send_flowc(ep, skb); | |
1870 | ||
1871 | return 0; | |
1872 | } | |
1873 | ||
1874 | static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |
1875 | { | |
1876 | struct cpl_peer_close *hdr = cplhdr(skb); | |
1877 | struct c4iw_ep *ep; | |
1878 | struct c4iw_qp_attributes attrs; | |
cfdda9d7 SW |
1879 | int disconnect = 1; |
1880 | int release = 0; | |
cfdda9d7 SW |
1881 | struct tid_info *t = dev->rdev.lldi.tids; |
1882 | unsigned int tid = GET_TID(hdr); | |
8da7e7a5 | 1883 | int ret; |
cfdda9d7 SW |
1884 | |
1885 | ep = lookup_tid(t, tid); | |
1886 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1887 | dst_confirm(ep->dst); | |
1888 | ||
2f5b48c3 | 1889 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
1890 | switch (ep->com.state) { |
1891 | case MPA_REQ_WAIT: | |
1892 | __state_set(&ep->com, CLOSING); | |
1893 | break; | |
1894 | case MPA_REQ_SENT: | |
1895 | __state_set(&ep->com, CLOSING); | |
1896 | connect_reply_upcall(ep, -ECONNRESET); | |
1897 | break; | |
1898 | case MPA_REQ_RCVD: | |
1899 | ||
1900 | /* | |
1901 | * We're gonna mark this puppy DEAD, but keep | |
1902 | * the reference on it until the ULP accepts or | |
1903 | * rejects the CR. Also wake up anyone waiting | |
1904 | * in rdma connection migration (see c4iw_accept_cr()). | |
1905 | */ | |
1906 | __state_set(&ep->com, CLOSING); | |
cfdda9d7 | 1907 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
d9594d99 | 1908 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
cfdda9d7 SW |
1909 | break; |
1910 | case MPA_REP_SENT: | |
1911 | __state_set(&ep->com, CLOSING); | |
cfdda9d7 | 1912 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
d9594d99 | 1913 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
cfdda9d7 SW |
1914 | break; |
1915 | case FPDU_MODE: | |
ca5a2202 | 1916 | start_ep_timer(ep); |
cfdda9d7 | 1917 | __state_set(&ep->com, CLOSING); |
30c95c2d | 1918 | attrs.next_state = C4IW_QP_STATE_CLOSING; |
8da7e7a5 | 1919 | ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
30c95c2d | 1920 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
8da7e7a5 SW |
1921 | if (ret != -ECONNRESET) { |
1922 | peer_close_upcall(ep); | |
1923 | disconnect = 1; | |
1924 | } | |
cfdda9d7 SW |
1925 | break; |
1926 | case ABORTING: | |
1927 | disconnect = 0; | |
1928 | break; | |
1929 | case CLOSING: | |
1930 | __state_set(&ep->com, MORIBUND); | |
1931 | disconnect = 0; | |
1932 | break; | |
1933 | case MORIBUND: | |
ca5a2202 | 1934 | stop_ep_timer(ep); |
cfdda9d7 SW |
1935 | if (ep->com.cm_id && ep->com.qp) { |
1936 | attrs.next_state = C4IW_QP_STATE_IDLE; | |
1937 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1938 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | |
1939 | } | |
1940 | close_complete_upcall(ep); | |
1941 | __state_set(&ep->com, DEAD); | |
1942 | release = 1; | |
1943 | disconnect = 0; | |
1944 | break; | |
1945 | case DEAD: | |
1946 | disconnect = 0; | |
1947 | break; | |
1948 | default: | |
1949 | BUG_ON(1); | |
1950 | } | |
2f5b48c3 | 1951 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1952 | if (disconnect) |
1953 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | |
1954 | if (release) | |
1955 | release_ep_resources(ep); | |
1956 | return 0; | |
1957 | } | |
1958 | ||
1959 | /* | |
1960 | * Returns whether an ABORT_REQ_RSS message is a negative advice. | |
1961 | */ | |
1962 | static int is_neg_adv_abort(unsigned int status) | |
1963 | { | |
1964 | return status == CPL_ERR_RTX_NEG_ADVICE || | |
1965 | status == CPL_ERR_PERSIST_NEG_ADVICE; | |
1966 | } | |
1967 | ||
d2fe99e8 KS |
1968 | static int c4iw_reconnect(struct c4iw_ep *ep) |
1969 | { | |
d2fe99e8 | 1970 | struct rtable *rt; |
3786cf18 | 1971 | int err = 0; |
d2fe99e8 KS |
1972 | |
1973 | PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); | |
1974 | init_timer(&ep->timer); | |
1975 | ||
1976 | /* | |
1977 | * Allocate an active TID to initiate a TCP connection. | |
1978 | */ | |
1979 | ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); | |
1980 | if (ep->atid == -1) { | |
1981 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | |
1982 | err = -ENOMEM; | |
1983 | goto fail2; | |
1984 | } | |
1985 | ||
1986 | /* find a route */ | |
1987 | rt = find_route(ep->com.dev, | |
1988 | ep->com.cm_id->local_addr.sin_addr.s_addr, | |
1989 | ep->com.cm_id->remote_addr.sin_addr.s_addr, | |
1990 | ep->com.cm_id->local_addr.sin_port, | |
1991 | ep->com.cm_id->remote_addr.sin_port, 0); | |
1992 | if (!rt) { | |
1993 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | |
1994 | err = -EHOSTUNREACH; | |
1995 | goto fail3; | |
1996 | } | |
1997 | ep->dst = &rt->dst; | |
1998 | ||
3786cf18 DM |
1999 | err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, |
2000 | ep->dst, ep->com.dev, false); | |
2001 | if (err) { | |
d2fe99e8 | 2002 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
d2fe99e8 KS |
2003 | goto fail4; |
2004 | } | |
2005 | ||
2006 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | |
2007 | __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, | |
2008 | ep->l2t->idx); | |
2009 | ||
2010 | state_set(&ep->com, CONNECTING); | |
2011 | ep->tos = 0; | |
2012 | ||
2013 | /* send connect request to rnic */ | |
2014 | err = send_connect(ep); | |
2015 | if (!err) | |
2016 | goto out; | |
2017 | ||
2018 | cxgb4_l2t_release(ep->l2t); | |
2019 | fail4: | |
2020 | dst_release(ep->dst); | |
2021 | fail3: | |
2022 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | |
2023 | fail2: | |
2024 | /* | |
2025 | * remember to send notification to upper layer. | |
2026 | * We are in here so the upper layer is not aware that this is | |
2027 | * re-connect attempt and so, upper layer is still waiting for | |
2028 | * response of 1st connect request. | |
2029 | */ | |
2030 | connect_reply_upcall(ep, -ECONNRESET); | |
2031 | c4iw_put_ep(&ep->com); | |
2032 | out: | |
2033 | return err; | |
2034 | } | |
2035 | ||
cfdda9d7 SW |
2036 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) |
2037 | { | |
2038 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
2039 | struct c4iw_ep *ep; | |
2040 | struct cpl_abort_rpl *rpl; | |
2041 | struct sk_buff *rpl_skb; | |
2042 | struct c4iw_qp_attributes attrs; | |
2043 | int ret; | |
2044 | int release = 0; | |
cfdda9d7 SW |
2045 | struct tid_info *t = dev->rdev.lldi.tids; |
2046 | unsigned int tid = GET_TID(req); | |
cfdda9d7 SW |
2047 | |
2048 | ep = lookup_tid(t, tid); | |
2049 | if (is_neg_adv_abort(req->status)) { | |
2050 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, | |
2051 | ep->hwtid); | |
2052 | return 0; | |
2053 | } | |
cfdda9d7 SW |
2054 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, |
2055 | ep->com.state); | |
2f5b48c3 SW |
2056 | |
2057 | /* | |
2058 | * Wake up any threads in rdma_init() or rdma_fini(). | |
d2fe99e8 KS |
2059 | * However, this is not needed if com state is just |
2060 | * MPA_REQ_SENT | |
2f5b48c3 | 2061 | */ |
d2fe99e8 KS |
2062 | if (ep->com.state != MPA_REQ_SENT) |
2063 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | |
2f5b48c3 SW |
2064 | |
2065 | mutex_lock(&ep->com.mutex); | |
cfdda9d7 SW |
2066 | switch (ep->com.state) { |
2067 | case CONNECTING: | |
2068 | break; | |
2069 | case MPA_REQ_WAIT: | |
ca5a2202 | 2070 | stop_ep_timer(ep); |
cfdda9d7 SW |
2071 | break; |
2072 | case MPA_REQ_SENT: | |
ca5a2202 | 2073 | stop_ep_timer(ep); |
d2fe99e8 KS |
2074 | if (mpa_rev == 2 && ep->tried_with_mpa_v1) |
2075 | connect_reply_upcall(ep, -ECONNRESET); | |
2076 | else { | |
2077 | /* | |
2078 | * we just don't send notification upwards because we | |
2079 | * want to retry with mpa_v1 without upper layers even | |
2080 | * knowing it. | |
2081 | * | |
2082 | * do some housekeeping so as to re-initiate the | |
2083 | * connection | |
2084 | */ | |
2085 | PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, | |
2086 | mpa_rev); | |
2087 | ep->retry_with_mpa_v1 = 1; | |
2088 | } | |
cfdda9d7 SW |
2089 | break; |
2090 | case MPA_REP_SENT: | |
cfdda9d7 SW |
2091 | break; |
2092 | case MPA_REQ_RCVD: | |
cfdda9d7 SW |
2093 | break; |
2094 | case MORIBUND: | |
2095 | case CLOSING: | |
ca5a2202 | 2096 | stop_ep_timer(ep); |
cfdda9d7 SW |
2097 | /*FALLTHROUGH*/ |
2098 | case FPDU_MODE: | |
2099 | if (ep->com.cm_id && ep->com.qp) { | |
2100 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
2101 | ret = c4iw_modify_qp(ep->com.qp->rhp, | |
2102 | ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, | |
2103 | &attrs, 1); | |
2104 | if (ret) | |
2105 | printk(KERN_ERR MOD | |
2106 | "%s - qp <- error failed!\n", | |
2107 | __func__); | |
2108 | } | |
2109 | peer_abort_upcall(ep); | |
2110 | break; | |
2111 | case ABORTING: | |
2112 | break; | |
2113 | case DEAD: | |
2114 | PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); | |
2f5b48c3 | 2115 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2116 | return 0; |
2117 | default: | |
2118 | BUG_ON(1); | |
2119 | break; | |
2120 | } | |
2121 | dst_confirm(ep->dst); | |
2122 | if (ep->com.state != ABORTING) { | |
2123 | __state_set(&ep->com, DEAD); | |
d2fe99e8 KS |
2124 | /* we don't release if we want to retry with mpa_v1 */ |
2125 | if (!ep->retry_with_mpa_v1) | |
2126 | release = 1; | |
cfdda9d7 | 2127 | } |
2f5b48c3 | 2128 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2129 | |
2130 | rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); | |
2131 | if (!rpl_skb) { | |
2132 | printk(KERN_ERR MOD "%s - cannot allocate skb!\n", | |
2133 | __func__); | |
2134 | release = 1; | |
2135 | goto out; | |
2136 | } | |
2137 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
2138 | rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); | |
2139 | INIT_TP_WR(rpl, ep->hwtid); | |
2140 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); | |
2141 | rpl->cmd = CPL_ABORT_NO_RST; | |
2142 | c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); | |
2143 | out: | |
cfdda9d7 SW |
2144 | if (release) |
2145 | release_ep_resources(ep); | |
d2fe99e8 KS |
2146 | |
2147 | /* retry with mpa-v1 */ | |
2148 | if (ep && ep->retry_with_mpa_v1) { | |
2149 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | |
2150 | dst_release(ep->dst); | |
2151 | cxgb4_l2t_release(ep->l2t); | |
2152 | c4iw_reconnect(ep); | |
2153 | } | |
2154 | ||
cfdda9d7 SW |
2155 | return 0; |
2156 | } | |
2157 | ||
2158 | static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
2159 | { | |
2160 | struct c4iw_ep *ep; | |
2161 | struct c4iw_qp_attributes attrs; | |
2162 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | |
cfdda9d7 SW |
2163 | int release = 0; |
2164 | struct tid_info *t = dev->rdev.lldi.tids; | |
2165 | unsigned int tid = GET_TID(rpl); | |
cfdda9d7 SW |
2166 | |
2167 | ep = lookup_tid(t, tid); | |
2168 | ||
2169 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
2170 | BUG_ON(!ep); | |
2171 | ||
2172 | /* The cm_id may be null if we failed to connect */ | |
2f5b48c3 | 2173 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
2174 | switch (ep->com.state) { |
2175 | case CLOSING: | |
2176 | __state_set(&ep->com, MORIBUND); | |
2177 | break; | |
2178 | case MORIBUND: | |
ca5a2202 | 2179 | stop_ep_timer(ep); |
cfdda9d7 SW |
2180 | if ((ep->com.cm_id) && (ep->com.qp)) { |
2181 | attrs.next_state = C4IW_QP_STATE_IDLE; | |
2182 | c4iw_modify_qp(ep->com.qp->rhp, | |
2183 | ep->com.qp, | |
2184 | C4IW_QP_ATTR_NEXT_STATE, | |
2185 | &attrs, 1); | |
2186 | } | |
2187 | close_complete_upcall(ep); | |
2188 | __state_set(&ep->com, DEAD); | |
2189 | release = 1; | |
2190 | break; | |
2191 | case ABORTING: | |
2192 | case DEAD: | |
2193 | break; | |
2194 | default: | |
2195 | BUG_ON(1); | |
2196 | break; | |
2197 | } | |
2f5b48c3 | 2198 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2199 | if (release) |
2200 | release_ep_resources(ep); | |
2201 | return 0; | |
2202 | } | |
2203 | ||
2204 | static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) | |
2205 | { | |
0e42c1f4 | 2206 | struct cpl_rdma_terminate *rpl = cplhdr(skb); |
cfdda9d7 | 2207 | struct tid_info *t = dev->rdev.lldi.tids; |
0e42c1f4 SW |
2208 | unsigned int tid = GET_TID(rpl); |
2209 | struct c4iw_ep *ep; | |
2210 | struct c4iw_qp_attributes attrs; | |
cfdda9d7 SW |
2211 | |
2212 | ep = lookup_tid(t, tid); | |
0e42c1f4 | 2213 | BUG_ON(!ep); |
cfdda9d7 | 2214 | |
30c95c2d | 2215 | if (ep && ep->com.qp) { |
0e42c1f4 SW |
2216 | printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, |
2217 | ep->com.qp->wq.sq.qid); | |
2218 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | |
2219 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
2220 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | |
2221 | } else | |
30c95c2d | 2222 | printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); |
cfdda9d7 | 2223 | |
cfdda9d7 SW |
2224 | return 0; |
2225 | } | |
2226 | ||
2227 | /* | |
2228 | * Upcall from the adapter indicating data has been transmitted. | |
2229 | * For us its just the single MPA request or reply. We can now free | |
2230 | * the skb holding the mpa message. | |
2231 | */ | |
2232 | static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) | |
2233 | { | |
2234 | struct c4iw_ep *ep; | |
2235 | struct cpl_fw4_ack *hdr = cplhdr(skb); | |
2236 | u8 credits = hdr->credits; | |
2237 | unsigned int tid = GET_TID(hdr); | |
2238 | struct tid_info *t = dev->rdev.lldi.tids; | |
2239 | ||
2240 | ||
2241 | ep = lookup_tid(t, tid); | |
2242 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | |
2243 | if (credits == 0) { | |
aa1ad260 JP |
2244 | PDBG("%s 0 credit ack ep %p tid %u state %u\n", |
2245 | __func__, ep, ep->hwtid, state_read(&ep->com)); | |
cfdda9d7 SW |
2246 | return 0; |
2247 | } | |
2248 | ||
2249 | dst_confirm(ep->dst); | |
2250 | if (ep->mpa_skb) { | |
2251 | PDBG("%s last streaming msg ack ep %p tid %u state %u " | |
2252 | "initiator %u freeing skb\n", __func__, ep, ep->hwtid, | |
2253 | state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); | |
2254 | kfree_skb(ep->mpa_skb); | |
2255 | ep->mpa_skb = NULL; | |
2256 | } | |
2257 | return 0; | |
2258 | } | |
2259 | ||
cfdda9d7 SW |
2260 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) |
2261 | { | |
2262 | int err; | |
2263 | struct c4iw_ep *ep = to_ep(cm_id); | |
2264 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
2265 | ||
2266 | if (state_read(&ep->com) == DEAD) { | |
2267 | c4iw_put_ep(&ep->com); | |
2268 | return -ECONNRESET; | |
2269 | } | |
2270 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | |
2271 | if (mpa_rev == 0) | |
2272 | abort_connection(ep, NULL, GFP_KERNEL); | |
2273 | else { | |
2274 | err = send_mpa_reject(ep, pdata, pdata_len); | |
2275 | err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | |
2276 | } | |
2277 | c4iw_put_ep(&ep->com); | |
2278 | return 0; | |
2279 | } | |
2280 | ||
2281 | int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
2282 | { | |
2283 | int err; | |
2284 | struct c4iw_qp_attributes attrs; | |
2285 | enum c4iw_qp_attr_mask mask; | |
2286 | struct c4iw_ep *ep = to_ep(cm_id); | |
2287 | struct c4iw_dev *h = to_c4iw_dev(cm_id->device); | |
2288 | struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); | |
2289 | ||
2290 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
2291 | if (state_read(&ep->com) == DEAD) { | |
2292 | err = -ECONNRESET; | |
2293 | goto err; | |
2294 | } | |
2295 | ||
2296 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | |
2297 | BUG_ON(!qp); | |
2298 | ||
be4c9bad RD |
2299 | if ((conn_param->ord > c4iw_max_read_depth) || |
2300 | (conn_param->ird > c4iw_max_read_depth)) { | |
cfdda9d7 SW |
2301 | abort_connection(ep, NULL, GFP_KERNEL); |
2302 | err = -EINVAL; | |
2303 | goto err; | |
2304 | } | |
2305 | ||
d2fe99e8 KS |
2306 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
2307 | if (conn_param->ord > ep->ird) { | |
2308 | ep->ird = conn_param->ird; | |
2309 | ep->ord = conn_param->ord; | |
2310 | send_mpa_reject(ep, conn_param->private_data, | |
2311 | conn_param->private_data_len); | |
2312 | abort_connection(ep, NULL, GFP_KERNEL); | |
2313 | err = -ENOMEM; | |
2314 | goto err; | |
2315 | } | |
2316 | if (conn_param->ird > ep->ord) { | |
2317 | if (!ep->ord) | |
2318 | conn_param->ird = 1; | |
2319 | else { | |
2320 | abort_connection(ep, NULL, GFP_KERNEL); | |
2321 | err = -ENOMEM; | |
2322 | goto err; | |
2323 | } | |
2324 | } | |
cfdda9d7 | 2325 | |
d2fe99e8 | 2326 | } |
cfdda9d7 SW |
2327 | ep->ird = conn_param->ird; |
2328 | ep->ord = conn_param->ord; | |
2329 | ||
d2fe99e8 KS |
2330 | if (ep->mpa_attr.version != 2) |
2331 | if (peer2peer && ep->ird == 0) | |
2332 | ep->ird = 1; | |
cfdda9d7 SW |
2333 | |
2334 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); | |
2335 | ||
d2fe99e8 KS |
2336 | cm_id->add_ref(cm_id); |
2337 | ep->com.cm_id = cm_id; | |
2338 | ep->com.qp = qp; | |
2339 | ||
cfdda9d7 SW |
2340 | /* bind QP to EP and move to RTS */ |
2341 | attrs.mpa_attr = ep->mpa_attr; | |
2342 | attrs.max_ird = ep->ird; | |
2343 | attrs.max_ord = ep->ord; | |
2344 | attrs.llp_stream_handle = ep; | |
2345 | attrs.next_state = C4IW_QP_STATE_RTS; | |
2346 | ||
2347 | /* bind QP and TID with INIT_WR */ | |
2348 | mask = C4IW_QP_ATTR_NEXT_STATE | | |
2349 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | | |
2350 | C4IW_QP_ATTR_MPA_ATTR | | |
2351 | C4IW_QP_ATTR_MAX_IRD | | |
2352 | C4IW_QP_ATTR_MAX_ORD; | |
2353 | ||
2354 | err = c4iw_modify_qp(ep->com.qp->rhp, | |
2355 | ep->com.qp, mask, &attrs, 1); | |
2356 | if (err) | |
2357 | goto err1; | |
2358 | err = send_mpa_reply(ep, conn_param->private_data, | |
2359 | conn_param->private_data_len); | |
2360 | if (err) | |
2361 | goto err1; | |
2362 | ||
2363 | state_set(&ep->com, FPDU_MODE); | |
2364 | established_upcall(ep); | |
2365 | c4iw_put_ep(&ep->com); | |
2366 | return 0; | |
2367 | err1: | |
2368 | ep->com.cm_id = NULL; | |
2369 | ep->com.qp = NULL; | |
2370 | cm_id->rem_ref(cm_id); | |
2371 | err: | |
2372 | c4iw_put_ep(&ep->com); | |
2373 | return err; | |
2374 | } | |
2375 | ||
2376 | int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
2377 | { | |
cfdda9d7 SW |
2378 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); |
2379 | struct c4iw_ep *ep; | |
2380 | struct rtable *rt; | |
3786cf18 | 2381 | int err = 0; |
cfdda9d7 | 2382 | |
be4c9bad RD |
2383 | if ((conn_param->ord > c4iw_max_read_depth) || |
2384 | (conn_param->ird > c4iw_max_read_depth)) { | |
2385 | err = -EINVAL; | |
2386 | goto out; | |
2387 | } | |
cfdda9d7 SW |
2388 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); |
2389 | if (!ep) { | |
2390 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); | |
2391 | err = -ENOMEM; | |
2392 | goto out; | |
2393 | } | |
2394 | init_timer(&ep->timer); | |
2395 | ep->plen = conn_param->private_data_len; | |
2396 | if (ep->plen) | |
2397 | memcpy(ep->mpa_pkt + sizeof(struct mpa_message), | |
2398 | conn_param->private_data, ep->plen); | |
2399 | ep->ird = conn_param->ird; | |
2400 | ep->ord = conn_param->ord; | |
2401 | ||
2402 | if (peer2peer && ep->ord == 0) | |
2403 | ep->ord = 1; | |
2404 | ||
2405 | cm_id->add_ref(cm_id); | |
2406 | ep->com.dev = dev; | |
2407 | ep->com.cm_id = cm_id; | |
2408 | ep->com.qp = get_qhp(dev, conn_param->qpn); | |
2409 | BUG_ON(!ep->com.qp); | |
2410 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, | |
2411 | ep->com.qp, cm_id); | |
2412 | ||
2413 | /* | |
2414 | * Allocate an active TID to initiate a TCP connection. | |
2415 | */ | |
2416 | ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); | |
2417 | if (ep->atid == -1) { | |
2418 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | |
2419 | err = -ENOMEM; | |
2420 | goto fail2; | |
2421 | } | |
2422 | ||
2423 | PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, | |
2424 | ntohl(cm_id->local_addr.sin_addr.s_addr), | |
2425 | ntohs(cm_id->local_addr.sin_port), | |
2426 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | |
2427 | ntohs(cm_id->remote_addr.sin_port)); | |
2428 | ||
2429 | /* find a route */ | |
2430 | rt = find_route(dev, | |
2431 | cm_id->local_addr.sin_addr.s_addr, | |
2432 | cm_id->remote_addr.sin_addr.s_addr, | |
2433 | cm_id->local_addr.sin_port, | |
2434 | cm_id->remote_addr.sin_port, 0); | |
2435 | if (!rt) { | |
2436 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | |
2437 | err = -EHOSTUNREACH; | |
2438 | goto fail3; | |
2439 | } | |
d8d1f30b | 2440 | ep->dst = &rt->dst; |
cfdda9d7 | 2441 | |
3786cf18 DM |
2442 | err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, |
2443 | ep->dst, ep->com.dev, true); | |
2444 | if (err) { | |
cfdda9d7 | 2445 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
cfdda9d7 SW |
2446 | goto fail4; |
2447 | } | |
2448 | ||
2449 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | |
2450 | __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, | |
2451 | ep->l2t->idx); | |
2452 | ||
2453 | state_set(&ep->com, CONNECTING); | |
2454 | ep->tos = 0; | |
2455 | ep->com.local_addr = cm_id->local_addr; | |
2456 | ep->com.remote_addr = cm_id->remote_addr; | |
2457 | ||
2458 | /* send connect request to rnic */ | |
2459 | err = send_connect(ep); | |
2460 | if (!err) | |
2461 | goto out; | |
2462 | ||
2463 | cxgb4_l2t_release(ep->l2t); | |
2464 | fail4: | |
2465 | dst_release(ep->dst); | |
2466 | fail3: | |
2467 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | |
2468 | fail2: | |
2469 | cm_id->rem_ref(cm_id); | |
2470 | c4iw_put_ep(&ep->com); | |
2471 | out: | |
2472 | return err; | |
2473 | } | |
2474 | ||
2475 | int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |
2476 | { | |
2477 | int err = 0; | |
2478 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); | |
2479 | struct c4iw_listen_ep *ep; | |
2480 | ||
2481 | ||
2482 | might_sleep(); | |
2483 | ||
2484 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | |
2485 | if (!ep) { | |
2486 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); | |
2487 | err = -ENOMEM; | |
2488 | goto fail1; | |
2489 | } | |
2490 | PDBG("%s ep %p\n", __func__, ep); | |
2491 | cm_id->add_ref(cm_id); | |
2492 | ep->com.cm_id = cm_id; | |
2493 | ep->com.dev = dev; | |
2494 | ep->backlog = backlog; | |
2495 | ep->com.local_addr = cm_id->local_addr; | |
2496 | ||
2497 | /* | |
2498 | * Allocate a server TID. | |
2499 | */ | |
2500 | ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); | |
2501 | if (ep->stid == -1) { | |
be4c9bad | 2502 | printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); |
cfdda9d7 SW |
2503 | err = -ENOMEM; |
2504 | goto fail2; | |
2505 | } | |
2506 | ||
2507 | state_set(&ep->com, LISTEN); | |
aadc4df3 | 2508 | c4iw_init_wr_wait(&ep->com.wr_wait); |
cfdda9d7 SW |
2509 | err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, |
2510 | ep->com.local_addr.sin_addr.s_addr, | |
2511 | ep->com.local_addr.sin_port, | |
2512 | ep->com.dev->rdev.lldi.rxq_ids[0]); | |
2513 | if (err) | |
2514 | goto fail3; | |
2515 | ||
2516 | /* wait for pass_open_rpl */ | |
aadc4df3 SW |
2517 | err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, |
2518 | __func__); | |
cfdda9d7 SW |
2519 | if (!err) { |
2520 | cm_id->provider_data = ep; | |
2521 | goto out; | |
2522 | } | |
2523 | fail3: | |
2524 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); | |
2525 | fail2: | |
2526 | cm_id->rem_ref(cm_id); | |
2527 | c4iw_put_ep(&ep->com); | |
2528 | fail1: | |
2529 | out: | |
2530 | return err; | |
2531 | } | |
2532 | ||
2533 | int c4iw_destroy_listen(struct iw_cm_id *cm_id) | |
2534 | { | |
2535 | int err; | |
2536 | struct c4iw_listen_ep *ep = to_listen_ep(cm_id); | |
2537 | ||
2538 | PDBG("%s ep %p\n", __func__, ep); | |
2539 | ||
2540 | might_sleep(); | |
2541 | state_set(&ep->com, DEAD); | |
aadc4df3 | 2542 | c4iw_init_wr_wait(&ep->com.wr_wait); |
cfdda9d7 SW |
2543 | err = listen_stop(ep); |
2544 | if (err) | |
2545 | goto done; | |
aadc4df3 SW |
2546 | err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, |
2547 | __func__); | |
cfdda9d7 SW |
2548 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); |
2549 | done: | |
cfdda9d7 SW |
2550 | cm_id->rem_ref(cm_id); |
2551 | c4iw_put_ep(&ep->com); | |
2552 | return err; | |
2553 | } | |
2554 | ||
2555 | int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |
2556 | { | |
2557 | int ret = 0; | |
cfdda9d7 SW |
2558 | int close = 0; |
2559 | int fatal = 0; | |
2560 | struct c4iw_rdev *rdev; | |
cfdda9d7 | 2561 | |
2f5b48c3 | 2562 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
2563 | |
2564 | PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, | |
2565 | states[ep->com.state], abrupt); | |
2566 | ||
2567 | rdev = &ep->com.dev->rdev; | |
2568 | if (c4iw_fatal_error(rdev)) { | |
2569 | fatal = 1; | |
2570 | close_complete_upcall(ep); | |
2571 | ep->com.state = DEAD; | |
2572 | } | |
2573 | switch (ep->com.state) { | |
2574 | case MPA_REQ_WAIT: | |
2575 | case MPA_REQ_SENT: | |
2576 | case MPA_REQ_RCVD: | |
2577 | case MPA_REP_SENT: | |
2578 | case FPDU_MODE: | |
2579 | close = 1; | |
2580 | if (abrupt) | |
2581 | ep->com.state = ABORTING; | |
2582 | else { | |
2583 | ep->com.state = CLOSING; | |
ca5a2202 | 2584 | start_ep_timer(ep); |
cfdda9d7 SW |
2585 | } |
2586 | set_bit(CLOSE_SENT, &ep->com.flags); | |
2587 | break; | |
2588 | case CLOSING: | |
2589 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { | |
2590 | close = 1; | |
2591 | if (abrupt) { | |
ca5a2202 | 2592 | stop_ep_timer(ep); |
cfdda9d7 SW |
2593 | ep->com.state = ABORTING; |
2594 | } else | |
2595 | ep->com.state = MORIBUND; | |
2596 | } | |
2597 | break; | |
2598 | case MORIBUND: | |
2599 | case ABORTING: | |
2600 | case DEAD: | |
2601 | PDBG("%s ignoring disconnect ep %p state %u\n", | |
2602 | __func__, ep, ep->com.state); | |
2603 | break; | |
2604 | default: | |
2605 | BUG(); | |
2606 | break; | |
2607 | } | |
2608 | ||
cfdda9d7 | 2609 | if (close) { |
8da7e7a5 SW |
2610 | if (abrupt) { |
2611 | close_complete_upcall(ep); | |
2612 | ret = send_abort(ep, NULL, gfp); | |
2613 | } else | |
cfdda9d7 SW |
2614 | ret = send_halfclose(ep, gfp); |
2615 | if (ret) | |
2616 | fatal = 1; | |
2617 | } | |
8da7e7a5 | 2618 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2619 | if (fatal) |
2620 | release_ep_resources(ep); | |
2621 | return ret; | |
2622 | } | |
2623 | ||
2f5b48c3 SW |
2624 | static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) |
2625 | { | |
2626 | struct cpl_fw6_msg *rpl = cplhdr(skb); | |
2627 | c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); | |
2628 | return 0; | |
2629 | } | |
2630 | ||
be4c9bad RD |
2631 | /* |
2632 | * These are the real handlers that are called from a | |
2633 | * work queue. | |
2634 | */ | |
2635 | static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { | |
2636 | [CPL_ACT_ESTABLISH] = act_establish, | |
2637 | [CPL_ACT_OPEN_RPL] = act_open_rpl, | |
2638 | [CPL_RX_DATA] = rx_data, | |
2639 | [CPL_ABORT_RPL_RSS] = abort_rpl, | |
2640 | [CPL_ABORT_RPL] = abort_rpl, | |
2641 | [CPL_PASS_OPEN_RPL] = pass_open_rpl, | |
2642 | [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, | |
2643 | [CPL_PASS_ACCEPT_REQ] = pass_accept_req, | |
2644 | [CPL_PASS_ESTABLISH] = pass_establish, | |
2645 | [CPL_PEER_CLOSE] = peer_close, | |
2646 | [CPL_ABORT_REQ_RSS] = peer_abort, | |
2647 | [CPL_CLOSE_CON_RPL] = close_con_rpl, | |
2648 | [CPL_RDMA_TERMINATE] = terminate, | |
2f5b48c3 SW |
2649 | [CPL_FW4_ACK] = fw4_ack, |
2650 | [CPL_FW6_MSG] = async_event | |
be4c9bad RD |
2651 | }; |
2652 | ||
2653 | static void process_timeout(struct c4iw_ep *ep) | |
2654 | { | |
2655 | struct c4iw_qp_attributes attrs; | |
2656 | int abort = 1; | |
2657 | ||
2f5b48c3 | 2658 | mutex_lock(&ep->com.mutex); |
be4c9bad RD |
2659 | PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, |
2660 | ep->com.state); | |
2661 | switch (ep->com.state) { | |
2662 | case MPA_REQ_SENT: | |
2663 | __state_set(&ep->com, ABORTING); | |
2664 | connect_reply_upcall(ep, -ETIMEDOUT); | |
2665 | break; | |
2666 | case MPA_REQ_WAIT: | |
2667 | __state_set(&ep->com, ABORTING); | |
2668 | break; | |
2669 | case CLOSING: | |
2670 | case MORIBUND: | |
2671 | if (ep->com.cm_id && ep->com.qp) { | |
2672 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
2673 | c4iw_modify_qp(ep->com.qp->rhp, | |
2674 | ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, | |
2675 | &attrs, 1); | |
2676 | } | |
2677 | __state_set(&ep->com, ABORTING); | |
2678 | break; | |
2679 | default: | |
76f267b7 | 2680 | WARN(1, "%s unexpected state ep %p tid %u state %u\n", |
be4c9bad | 2681 | __func__, ep, ep->hwtid, ep->com.state); |
be4c9bad RD |
2682 | abort = 0; |
2683 | } | |
2f5b48c3 | 2684 | mutex_unlock(&ep->com.mutex); |
be4c9bad RD |
2685 | if (abort) |
2686 | abort_connection(ep, NULL, GFP_KERNEL); | |
2687 | c4iw_put_ep(&ep->com); | |
2688 | } | |
2689 | ||
2690 | static void process_timedout_eps(void) | |
2691 | { | |
2692 | struct c4iw_ep *ep; | |
2693 | ||
2694 | spin_lock_irq(&timeout_lock); | |
2695 | while (!list_empty(&timeout_list)) { | |
2696 | struct list_head *tmp; | |
2697 | ||
2698 | tmp = timeout_list.next; | |
2699 | list_del(tmp); | |
2700 | spin_unlock_irq(&timeout_lock); | |
2701 | ep = list_entry(tmp, struct c4iw_ep, entry); | |
2702 | process_timeout(ep); | |
2703 | spin_lock_irq(&timeout_lock); | |
2704 | } | |
2705 | spin_unlock_irq(&timeout_lock); | |
2706 | } | |
2707 | ||
2708 | static void process_work(struct work_struct *work) | |
2709 | { | |
2710 | struct sk_buff *skb = NULL; | |
2711 | struct c4iw_dev *dev; | |
c1d7356c | 2712 | struct cpl_act_establish *rpl; |
be4c9bad RD |
2713 | unsigned int opcode; |
2714 | int ret; | |
2715 | ||
2716 | while ((skb = skb_dequeue(&rxq))) { | |
2717 | rpl = cplhdr(skb); | |
2718 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); | |
2719 | opcode = rpl->ot.opcode; | |
2720 | ||
2721 | BUG_ON(!work_handlers[opcode]); | |
2722 | ret = work_handlers[opcode](dev, skb); | |
2723 | if (!ret) | |
2724 | kfree_skb(skb); | |
2725 | } | |
2726 | process_timedout_eps(); | |
2727 | } | |
2728 | ||
2729 | static DECLARE_WORK(skb_work, process_work); | |
2730 | ||
2731 | static void ep_timeout(unsigned long arg) | |
2732 | { | |
2733 | struct c4iw_ep *ep = (struct c4iw_ep *)arg; | |
2734 | ||
2735 | spin_lock(&timeout_lock); | |
2736 | list_add_tail(&ep->entry, &timeout_list); | |
2737 | spin_unlock(&timeout_lock); | |
2738 | queue_work(workq, &skb_work); | |
2739 | } | |
2740 | ||
cfdda9d7 SW |
2741 | /* |
2742 | * All the CM events are handled on a work queue to have a safe context. | |
2743 | */ | |
2744 | static int sched(struct c4iw_dev *dev, struct sk_buff *skb) | |
2745 | { | |
2746 | ||
2747 | /* | |
2748 | * Save dev in the skb->cb area. | |
2749 | */ | |
2750 | *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; | |
2751 | ||
2752 | /* | |
2753 | * Queue the skb and schedule the worker thread. | |
2754 | */ | |
2755 | skb_queue_tail(&rxq, skb); | |
2756 | queue_work(workq, &skb_work); | |
2757 | return 0; | |
2758 | } | |
2759 | ||
2760 | static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
2761 | { | |
2762 | struct cpl_set_tcb_rpl *rpl = cplhdr(skb); | |
2763 | ||
2764 | if (rpl->status != CPL_ERR_NONE) { | |
2765 | printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " | |
2766 | "for tid %u\n", rpl->status, GET_TID(rpl)); | |
2767 | } | |
2f5b48c3 | 2768 | kfree_skb(skb); |
cfdda9d7 SW |
2769 | return 0; |
2770 | } | |
2771 | ||
be4c9bad RD |
2772 | static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) |
2773 | { | |
2774 | struct cpl_fw6_msg *rpl = cplhdr(skb); | |
2775 | struct c4iw_wr_wait *wr_waitp; | |
2776 | int ret; | |
5be78ee9 VP |
2777 | u8 opcode; |
2778 | struct cpl_fw6_msg_ofld_connection_wr_rpl *req; | |
2779 | struct c4iw_ep *ep; | |
be4c9bad RD |
2780 | |
2781 | PDBG("%s type %u\n", __func__, rpl->type); | |
2782 | ||
2783 | switch (rpl->type) { | |
5be78ee9 | 2784 | case FW6_TYPE_WR_RPL: |
be4c9bad | 2785 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); |
c8e081a1 | 2786 | wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; |
be4c9bad | 2787 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); |
d9594d99 SW |
2788 | if (wr_waitp) |
2789 | c4iw_wake_up(wr_waitp, ret ? -ret : 0); | |
2f5b48c3 | 2790 | kfree_skb(skb); |
be4c9bad | 2791 | break; |
5be78ee9 | 2792 | case FW6_TYPE_CQE: |
2f5b48c3 | 2793 | sched(dev, skb); |
be4c9bad | 2794 | break; |
5be78ee9 VP |
2795 | case FW6_TYPE_OFLD_CONNECTION_WR_RPL: |
2796 | opcode = *(const u8 *)rpl->data; | |
2797 | if (opcode == FW_OFLD_CONNECTION_WR) { | |
2798 | req = | |
2799 | (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; | |
2800 | if (req->t_state == TCP_SYN_SENT | |
2801 | && (req->retval == FW_ENOMEM | |
2802 | || req->retval == FW_EADDRINUSE)) { | |
2803 | ep = (struct c4iw_ep *) | |
2804 | lookup_atid(dev->rdev.lldi.tids, | |
2805 | req->tid); | |
2806 | c4iw_l2t_send(&dev->rdev, skb, ep->l2t); | |
2807 | return 0; | |
2808 | } | |
2809 | } | |
2810 | break; | |
be4c9bad RD |
2811 | default: |
2812 | printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, | |
2813 | rpl->type); | |
2f5b48c3 | 2814 | kfree_skb(skb); |
be4c9bad RD |
2815 | break; |
2816 | } | |
2817 | return 0; | |
2818 | } | |
2819 | ||
8da7e7a5 SW |
2820 | static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) |
2821 | { | |
2822 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
2823 | struct c4iw_ep *ep; | |
2824 | struct tid_info *t = dev->rdev.lldi.tids; | |
2825 | unsigned int tid = GET_TID(req); | |
2826 | ||
2827 | ep = lookup_tid(t, tid); | |
14b92228 SW |
2828 | if (!ep) { |
2829 | printk(KERN_WARNING MOD | |
2830 | "Abort on non-existent endpoint, tid %d\n", tid); | |
2831 | kfree_skb(skb); | |
2832 | return 0; | |
2833 | } | |
8da7e7a5 SW |
2834 | if (is_neg_adv_abort(req->status)) { |
2835 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, | |
2836 | ep->hwtid); | |
2837 | kfree_skb(skb); | |
2838 | return 0; | |
2839 | } | |
2840 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, | |
2841 | ep->com.state); | |
2842 | ||
2843 | /* | |
2844 | * Wake up any threads in rdma_init() or rdma_fini(). | |
2845 | */ | |
0f1dcfae | 2846 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
8da7e7a5 SW |
2847 | sched(dev, skb); |
2848 | return 0; | |
2849 | } | |
2850 | ||
be4c9bad RD |
2851 | /* |
2852 | * Most upcalls from the T4 Core go to sched() to | |
2853 | * schedule the processing on a work queue. | |
2854 | */ | |
2855 | c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { | |
2856 | [CPL_ACT_ESTABLISH] = sched, | |
2857 | [CPL_ACT_OPEN_RPL] = sched, | |
2858 | [CPL_RX_DATA] = sched, | |
2859 | [CPL_ABORT_RPL_RSS] = sched, | |
2860 | [CPL_ABORT_RPL] = sched, | |
2861 | [CPL_PASS_OPEN_RPL] = sched, | |
2862 | [CPL_CLOSE_LISTSRV_RPL] = sched, | |
2863 | [CPL_PASS_ACCEPT_REQ] = sched, | |
2864 | [CPL_PASS_ESTABLISH] = sched, | |
2865 | [CPL_PEER_CLOSE] = sched, | |
2866 | [CPL_CLOSE_CON_RPL] = sched, | |
8da7e7a5 | 2867 | [CPL_ABORT_REQ_RSS] = peer_abort_intr, |
be4c9bad RD |
2868 | [CPL_RDMA_TERMINATE] = sched, |
2869 | [CPL_FW4_ACK] = sched, | |
2870 | [CPL_SET_TCB_RPL] = set_tcb_rpl, | |
2871 | [CPL_FW6_MSG] = fw6_msg | |
2872 | }; | |
2873 | ||
cfdda9d7 SW |
2874 | int __init c4iw_cm_init(void) |
2875 | { | |
be4c9bad | 2876 | spin_lock_init(&timeout_lock); |
cfdda9d7 SW |
2877 | skb_queue_head_init(&rxq); |
2878 | ||
2879 | workq = create_singlethread_workqueue("iw_cxgb4"); | |
2880 | if (!workq) | |
2881 | return -ENOMEM; | |
2882 | ||
cfdda9d7 SW |
2883 | return 0; |
2884 | } | |
2885 | ||
2886 | void __exit c4iw_cm_term(void) | |
2887 | { | |
be4c9bad | 2888 | WARN_ON(!list_empty(&timeout_list)); |
cfdda9d7 SW |
2889 | flush_workqueue(workq); |
2890 | destroy_workqueue(workq); | |
2891 | } |