Merge branch 'bkl/procfs' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb3 / iwch_cm.c
CommitLineData
b038ced7
SW
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
b038ced7
SW
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
5a0e3ad6 34#include <linux/slab.h>
b038ced7
SW
35#include <linux/workqueue.h>
36#include <linux/skbuff.h>
37#include <linux/timer.h>
38#include <linux/notifier.h>
8704e9a8 39#include <linux/inetdevice.h>
b038ced7
SW
40
41#include <net/neighbour.h>
42#include <net/netevent.h>
43#include <net/route.h>
44
45#include "tcb.h"
46#include "cxgb3_offload.h"
47#include "iwch.h"
48#include "iwch_provider.h"
49#include "iwch_cm.h"
50
51static char *states[] = {
52 "idle",
53 "listen",
54 "connecting",
55 "mpa_wait_req",
56 "mpa_req_sent",
57 "mpa_req_rcvd",
58 "mpa_rep_sent",
59 "fpdu_mode",
60 "aborting",
61 "closing",
62 "moribund",
63 "dead",
64 NULL,
65};
66
f8b0dfd1
SW
67int peer2peer = 0;
68module_param(peer2peer, int, 0644);
69MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
70
77a8d574 71static int ep_timeout_secs = 60;
e54664c0 72module_param(ep_timeout_secs, int, 0644);
b038ced7 73MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
77a8d574 74 "in seconds (default=60)");
b038ced7
SW
75
76static int mpa_rev = 1;
e54664c0 77module_param(mpa_rev, int, 0644);
b038ced7
SW
78MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
79 "1 is spec compliant. (default=1)");
80
81static int markers_enabled = 0;
e54664c0 82module_param(markers_enabled, int, 0644);
b038ced7
SW
83MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
84
85static int crc_enabled = 1;
e54664c0 86module_param(crc_enabled, int, 0644);
b038ced7
SW
87MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
88
89static int rcv_win = 256 * 1024;
e54664c0 90module_param(rcv_win, int, 0644);
b038ced7
SW
91MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
92
93static int snd_win = 32 * 1024;
e54664c0 94module_param(snd_win, int, 0644);
b038ced7
SW
95MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
96
97static unsigned int nocong = 0;
e54664c0 98module_param(nocong, uint, 0644);
b038ced7
SW
99MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
100
101static unsigned int cong_flavor = 1;
e54664c0 102module_param(cong_flavor, uint, 0644);
b038ced7
SW
103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104
105static void process_work(struct work_struct *work);
106static struct workqueue_struct *workq;
107static DECLARE_WORK(skb_work, process_work);
108
109static struct sk_buff_head rxq;
110static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
111
112static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
113static void ep_timeout(unsigned long arg);
114static void connect_reply_upcall(struct iwch_ep *ep, int status);
115
116static void start_ep_timer(struct iwch_ep *ep)
117{
33718363 118 PDBG("%s ep %p\n", __func__, ep);
b038ced7 119 if (timer_pending(&ep->timer)) {
33718363 120 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
b038ced7
SW
121 del_timer_sync(&ep->timer);
122 } else
123 get_ep(&ep->com);
124 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
125 ep->timer.data = (unsigned long)ep;
126 ep->timer.function = ep_timeout;
127 add_timer(&ep->timer);
128}
129
130static void stop_ep_timer(struct iwch_ep *ep)
131{
33718363 132 PDBG("%s ep %p\n", __func__, ep);
989a1780
SW
133 if (!timer_pending(&ep->timer)) {
134 printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
135 __func__, ep, ep->com.state);
136 WARN_ON(1);
137 return;
138 }
b038ced7
SW
139 del_timer_sync(&ep->timer);
140 put_ep(&ep->com);
141}
142
04b5d028
SW
143int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
144{
145 int error = 0;
146 struct cxio_rdev *rdev;
147
148 rdev = (struct cxio_rdev *)tdev->ulp;
149 if (cxio_fatal_error(rdev)) {
150 kfree_skb(skb);
151 return -EIO;
152 }
153 error = l2t_send(tdev, skb, l2e);
154 if (error)
155 kfree_skb(skb);
156 return error;
157}
158
159int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
160{
161 int error = 0;
162 struct cxio_rdev *rdev;
163
164 rdev = (struct cxio_rdev *)tdev->ulp;
165 if (cxio_fatal_error(rdev)) {
166 kfree_skb(skb);
167 return -EIO;
168 }
169 error = cxgb3_ofld_send(tdev, skb);
170 if (error)
171 kfree_skb(skb);
172 return error;
173}
174
b038ced7
SW
175static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
176{
177 struct cpl_tid_release *req;
178
179 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
180 if (!skb)
181 return;
182 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
183 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
184 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
185 skb->priority = CPL_PRIORITY_SETUP;
04b5d028 186 iwch_cxgb3_ofld_send(tdev, skb);
b038ced7
SW
187 return;
188}
189
190int iwch_quiesce_tid(struct iwch_ep *ep)
191{
192 struct cpl_set_tcb_field *req;
193 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
194
195 if (!skb)
196 return -ENOMEM;
197 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
198 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
199 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
200 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
201 req->reply = 0;
202 req->cpu_idx = 0;
203 req->word = htons(W_TCB_RX_QUIESCE);
204 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
205 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
206
207 skb->priority = CPL_PRIORITY_DATA;
04b5d028 208 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
209}
210
211int iwch_resume_tid(struct iwch_ep *ep)
212{
213 struct cpl_set_tcb_field *req;
214 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
215
216 if (!skb)
217 return -ENOMEM;
218 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
219 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
220 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
221 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
222 req->reply = 0;
223 req->cpu_idx = 0;
224 req->word = htons(W_TCB_RX_QUIESCE);
225 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
226 req->val = 0;
227
228 skb->priority = CPL_PRIORITY_DATA;
04b5d028 229 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
230}
231
232static void set_emss(struct iwch_ep *ep, u16 opt)
233{
33718363 234 PDBG("%s ep %p opt %u\n", __func__, ep, opt);
b038ced7
SW
235 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
236 if (G_TCPOPT_TSTAMP(opt))
237 ep->emss -= 12;
238 if (ep->emss < 128)
239 ep->emss = 128;
240 PDBG("emss=%d\n", ep->emss);
241}
242
243static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
244{
245 unsigned long flags;
246 enum iwch_ep_state state;
247
248 spin_lock_irqsave(&epc->lock, flags);
249 state = epc->state;
250 spin_unlock_irqrestore(&epc->lock, flags);
251 return state;
252}
253
2b540355 254static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
b038ced7
SW
255{
256 epc->state = new;
257}
258
259static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
260{
261 unsigned long flags;
262
263 spin_lock_irqsave(&epc->lock, flags);
33718363 264 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
b038ced7
SW
265 __state_set(epc, new);
266 spin_unlock_irqrestore(&epc->lock, flags);
267 return;
268}
269
270static void *alloc_ep(int size, gfp_t gfp)
271{
272 struct iwch_ep_common *epc;
273
dd00cc48 274 epc = kzalloc(size, gfp);
b038ced7 275 if (epc) {
b038ced7
SW
276 kref_init(&epc->kref);
277 spin_lock_init(&epc->lock);
278 init_waitqueue_head(&epc->waitq);
279 }
33718363 280 PDBG("%s alloc ep %p\n", __func__, epc);
b038ced7
SW
281 return epc;
282}
283
284void __free_ep(struct kref *kref)
285{
874d8df5
SW
286 struct iwch_ep *ep;
287 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
288 struct iwch_ep, com);
289 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
6e47fe43 290 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
874d8df5
SW
291 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
292 dst_release(ep->dst);
293 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
294 }
295 kfree(ep);
b038ced7
SW
296}
297
298static void release_ep_resources(struct iwch_ep *ep)
299{
33718363 300 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
6e47fe43 301 set_bit(RELEASE_RESOURCES, &ep->com.flags);
b038ced7
SW
302 put_ep(&ep->com);
303}
304
305static void process_work(struct work_struct *work)
306{
307 struct sk_buff *skb = NULL;
308 void *ep;
309 struct t3cdev *tdev;
310 int ret;
311
312 while ((skb = skb_dequeue(&rxq))) {
313 ep = *((void **) (skb->cb));
314 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
315 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
316 if (ret & CPL_RET_BUF_DONE)
317 kfree_skb(skb);
318
319 /*
320 * ep was referenced in sched(), and is freed here.
321 */
322 put_ep((struct iwch_ep_common *)ep);
323 }
324}
325
326static int status2errno(int status)
327{
328 switch (status) {
329 case CPL_ERR_NONE:
330 return 0;
331 case CPL_ERR_CONN_RESET:
332 return -ECONNRESET;
333 case CPL_ERR_ARP_MISS:
334 return -EHOSTUNREACH;
335 case CPL_ERR_CONN_TIMEDOUT:
336 return -ETIMEDOUT;
337 case CPL_ERR_TCAM_FULL:
338 return -ENOMEM;
339 case CPL_ERR_CONN_EXIST:
340 return -EADDRINUSE;
341 default:
342 return -EIO;
343 }
344}
345
346/*
347 * Try and reuse skbs already allocated...
348 */
349static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
350{
1f6a849b 351 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
b038ced7
SW
352 skb_trim(skb, 0);
353 skb_get(skb);
354 } else {
355 skb = alloc_skb(len, gfp);
356 }
357 return skb;
358}
359
360static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
361 __be32 peer_ip, __be16 local_port,
362 __be16 peer_port, u8 tos)
363{
364 struct rtable *rt;
365 struct flowi fl = {
366 .oif = 0,
367 .nl_u = {
368 .ip4_u = {
369 .daddr = peer_ip,
370 .saddr = local_ip,
371 .tos = tos}
372 },
373 .proto = IPPROTO_TCP,
374 .uli_u = {
375 .ports = {
376 .sport = local_port,
377 .dport = peer_port}
378 }
379 };
380
f1b050bf 381 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
b038ced7
SW
382 return NULL;
383 return rt;
384}
385
386static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
387{
388 int i = 0;
389
390 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
391 ++i;
392 return i;
393}
394
395static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
396{
33718363 397 PDBG("%s t3cdev %p\n", __func__, dev);
b038ced7
SW
398 kfree_skb(skb);
399}
400
401/*
402 * Handle an ARP failure for an active open.
403 */
404static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
405{
406 printk(KERN_ERR MOD "ARP failure duing connect\n");
407 kfree_skb(skb);
408}
409
410/*
411 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
412 * and send it along.
413 */
414static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
415{
416 struct cpl_abort_req *req = cplhdr(skb);
417
33718363 418 PDBG("%s t3cdev %p\n", __func__, dev);
b038ced7 419 req->cmd = CPL_ABORT_NO_RST;
04b5d028 420 iwch_cxgb3_ofld_send(dev, skb);
b038ced7
SW
421}
422
423static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
424{
425 struct cpl_close_con_req *req;
426 struct sk_buff *skb;
427
33718363 428 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
429 skb = get_skb(NULL, sizeof(*req), gfp);
430 if (!skb) {
33718363 431 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
b038ced7
SW
432 return -ENOMEM;
433 }
434 skb->priority = CPL_PRIORITY_DATA;
435 set_arp_failure_handler(skb, arp_failure_discard);
436 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
438 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
439 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
04b5d028 440 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
441}
442
443static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
444{
445 struct cpl_abort_req *req;
446
33718363 447 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
448 skb = get_skb(skb, sizeof(*req), gfp);
449 if (!skb) {
450 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
33718363 451 __func__);
b038ced7
SW
452 return -ENOMEM;
453 }
454 skb->priority = CPL_PRIORITY_DATA;
455 set_arp_failure_handler(skb, abort_arp_failure);
456 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
457 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
458 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
459 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
460 req->cmd = CPL_ABORT_SEND_RST;
04b5d028 461 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
462}
463
464static int send_connect(struct iwch_ep *ep)
465{
466 struct cpl_act_open_req *req;
467 struct sk_buff *skb;
468 u32 opt0h, opt0l, opt2;
469 unsigned int mtu_idx;
470 int wscale;
471
33718363 472 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
473
474 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
475 if (!skb) {
476 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
33718363 477 __func__);
b038ced7
SW
478 return -ENOMEM;
479 }
480 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
481 wscale = compute_wscale(rcv_win);
482 opt0h = V_NAGLE(0) |
483 V_NO_CONG(nocong) |
484 V_KEEP_ALIVE(1) |
485 F_TCAM_BYPASS |
486 V_WND_SCALE(wscale) |
487 V_MSS_IDX(mtu_idx) |
488 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
489 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
490 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
491 skb->priority = CPL_PRIORITY_SETUP;
492 set_arp_failure_handler(skb, act_open_req_arp_failure);
493
494 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
495 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
496 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
497 req->local_port = ep->com.local_addr.sin_port;
498 req->peer_port = ep->com.remote_addr.sin_port;
499 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
500 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
501 req->opt0h = htonl(opt0h);
502 req->opt0l = htonl(opt0l);
503 req->params = 0;
504 req->opt2 = htonl(opt2);
04b5d028 505 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
506}
507
508static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
509{
510 int mpalen;
511 struct tx_data_wr *req;
512 struct mpa_message *mpa;
513 int len;
514
33718363 515 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
b038ced7
SW
516
517 BUG_ON(skb_cloned(skb));
518
519 mpalen = sizeof(*mpa) + ep->plen;
4305b541 520 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
b038ced7
SW
521 kfree_skb(skb);
522 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
523 if (!skb) {
524 connect_reply_upcall(ep, -ENOMEM);
525 return;
526 }
527 }
528 skb_trim(skb, 0);
529 skb_reserve(skb, sizeof(*req));
530 skb_put(skb, mpalen);
531 skb->priority = CPL_PRIORITY_DATA;
532 mpa = (struct mpa_message *) skb->data;
533 memset(mpa, 0, sizeof(*mpa));
534 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
535 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
536 (markers_enabled ? MPA_MARKERS : 0);
537 mpa->private_data_size = htons(ep->plen);
538 mpa->revision = mpa_rev;
539
540 if (ep->plen)
541 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
542
543 /*
544 * Reference the mpa skb. This ensures the data area
545 * will remain in memory until the hw acks the tx.
546 * Function tx_ack() will deref it.
547 */
548 skb_get(skb);
549 set_arp_failure_handler(skb, arp_failure_discard);
badff6d0 550 skb_reset_transport_header(skb);
b038ced7
SW
551 len = skb->len;
552 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
f8b0dfd1 553 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
b038ced7
SW
554 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
555 req->len = htonl(len);
556 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
557 V_TX_SNDBUF(snd_win>>15));
de3d3530 558 req->flags = htonl(F_TX_INIT);
b038ced7
SW
559 req->sndseq = htonl(ep->snd_seq);
560 BUG_ON(ep->mpa_skb);
561 ep->mpa_skb = skb;
04b5d028 562 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
563 start_ep_timer(ep);
564 state_set(&ep->com, MPA_REQ_SENT);
565 return;
566}
567
568static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
569{
570 int mpalen;
571 struct tx_data_wr *req;
572 struct mpa_message *mpa;
573 struct sk_buff *skb;
574
33718363 575 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
b038ced7
SW
576
577 mpalen = sizeof(*mpa) + plen;
578
579 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
580 if (!skb) {
33718363 581 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
b038ced7
SW
582 return -ENOMEM;
583 }
584 skb_reserve(skb, sizeof(*req));
585 mpa = (struct mpa_message *) skb_put(skb, mpalen);
586 memset(mpa, 0, sizeof(*mpa));
587 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
588 mpa->flags = MPA_REJECT;
589 mpa->revision = mpa_rev;
590 mpa->private_data_size = htons(plen);
591 if (plen)
592 memcpy(mpa->private_data, pdata, plen);
593
594 /*
595 * Reference the mpa skb again. This ensures the data area
596 * will remain in memory until the hw acks the tx.
597 * Function tx_ack() will deref it.
598 */
599 skb_get(skb);
600 skb->priority = CPL_PRIORITY_DATA;
601 set_arp_failure_handler(skb, arp_failure_discard);
badff6d0 602 skb_reset_transport_header(skb);
b038ced7 603 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
f8b0dfd1 604 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
b038ced7
SW
605 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
606 req->len = htonl(mpalen);
607 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
608 V_TX_SNDBUF(snd_win>>15));
de3d3530 609 req->flags = htonl(F_TX_INIT);
b038ced7
SW
610 req->sndseq = htonl(ep->snd_seq);
611 BUG_ON(ep->mpa_skb);
612 ep->mpa_skb = skb;
04b5d028 613 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
614}
615
616static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
617{
618 int mpalen;
619 struct tx_data_wr *req;
620 struct mpa_message *mpa;
621 int len;
622 struct sk_buff *skb;
623
33718363 624 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
b038ced7
SW
625
626 mpalen = sizeof(*mpa) + plen;
627
628 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
629 if (!skb) {
33718363 630 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
b038ced7
SW
631 return -ENOMEM;
632 }
633 skb->priority = CPL_PRIORITY_DATA;
634 skb_reserve(skb, sizeof(*req));
635 mpa = (struct mpa_message *) skb_put(skb, mpalen);
636 memset(mpa, 0, sizeof(*mpa));
637 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
638 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
639 (markers_enabled ? MPA_MARKERS : 0);
640 mpa->revision = mpa_rev;
641 mpa->private_data_size = htons(plen);
642 if (plen)
643 memcpy(mpa->private_data, pdata, plen);
644
645 /*
646 * Reference the mpa skb. This ensures the data area
647 * will remain in memory until the hw acks the tx.
648 * Function tx_ack() will deref it.
649 */
650 skb_get(skb);
651 set_arp_failure_handler(skb, arp_failure_discard);
badff6d0 652 skb_reset_transport_header(skb);
b038ced7
SW
653 len = skb->len;
654 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
f8b0dfd1 655 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
b038ced7
SW
656 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
657 req->len = htonl(len);
658 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
659 V_TX_SNDBUF(snd_win>>15));
de3d3530 660 req->flags = htonl(F_TX_INIT);
b038ced7
SW
661 req->sndseq = htonl(ep->snd_seq);
662 ep->mpa_skb = skb;
663 state_set(&ep->com, MPA_REP_SENT);
04b5d028 664 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
665}
666
667static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
668{
669 struct iwch_ep *ep = ctx;
670 struct cpl_act_establish *req = cplhdr(skb);
671 unsigned int tid = GET_TID(req);
672
33718363 673 PDBG("%s ep %p tid %d\n", __func__, ep, tid);
b038ced7
SW
674
675 dst_confirm(ep->dst);
676
677 /* setup the hwtid for this connection */
678 ep->hwtid = tid;
679 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
680
681 ep->snd_seq = ntohl(req->snd_isn);
de3d3530 682 ep->rcv_seq = ntohl(req->rcv_isn);
b038ced7
SW
683
684 set_emss(ep, ntohs(req->tcp_opt));
685
686 /* dealloc the atid */
687 cxgb3_free_atid(ep->com.tdev, ep->atid);
688
689 /* start MPA negotiation */
690 send_mpa_req(ep, skb);
691
692 return 0;
693}
694
695static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
696{
697 PDBG("%s ep %p\n", __FILE__, ep);
698 state_set(&ep->com, ABORTING);
699 send_abort(ep, skb, gfp);
700}
701
702static void close_complete_upcall(struct iwch_ep *ep)
703{
704 struct iw_cm_event event;
705
33718363 706 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
707 memset(&event, 0, sizeof(event));
708 event.event = IW_CM_EVENT_CLOSE;
709 if (ep->com.cm_id) {
710 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
711 ep, ep->com.cm_id, ep->hwtid);
712 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
713 ep->com.cm_id->rem_ref(ep->com.cm_id);
714 ep->com.cm_id = NULL;
715 ep->com.qp = NULL;
716 }
717}
718
719static void peer_close_upcall(struct iwch_ep *ep)
720{
721 struct iw_cm_event event;
722
33718363 723 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
724 memset(&event, 0, sizeof(event));
725 event.event = IW_CM_EVENT_DISCONNECT;
726 if (ep->com.cm_id) {
727 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
728 ep, ep->com.cm_id, ep->hwtid);
729 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
730 }
731}
732
733static void peer_abort_upcall(struct iwch_ep *ep)
734{
735 struct iw_cm_event event;
736
33718363 737 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
738 memset(&event, 0, sizeof(event));
739 event.event = IW_CM_EVENT_CLOSE;
740 event.status = -ECONNRESET;
741 if (ep->com.cm_id) {
742 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
743 ep->com.cm_id, ep->hwtid);
744 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
745 ep->com.cm_id->rem_ref(ep->com.cm_id);
746 ep->com.cm_id = NULL;
747 ep->com.qp = NULL;
748 }
749}
750
751static void connect_reply_upcall(struct iwch_ep *ep, int status)
752{
753 struct iw_cm_event event;
754
33718363 755 PDBG("%s ep %p status %d\n", __func__, ep, status);
b038ced7
SW
756 memset(&event, 0, sizeof(event));
757 event.event = IW_CM_EVENT_CONNECT_REPLY;
758 event.status = status;
759 event.local_addr = ep->com.local_addr;
760 event.remote_addr = ep->com.remote_addr;
761
762 if ((status == 0) || (status == -ECONNREFUSED)) {
763 event.private_data_len = ep->plen;
764 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
765 }
766 if (ep->com.cm_id) {
33718363 767 PDBG("%s ep %p tid %d status %d\n", __func__, ep,
b038ced7
SW
768 ep->hwtid, status);
769 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
770 }
771 if (status < 0) {
772 ep->com.cm_id->rem_ref(ep->com.cm_id);
773 ep->com.cm_id = NULL;
774 ep->com.qp = NULL;
775 }
776}
777
778static void connect_request_upcall(struct iwch_ep *ep)
779{
780 struct iw_cm_event event;
781
33718363 782 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
b038ced7
SW
783 memset(&event, 0, sizeof(event));
784 event.event = IW_CM_EVENT_CONNECT_REQUEST;
785 event.local_addr = ep->com.local_addr;
786 event.remote_addr = ep->com.remote_addr;
787 event.private_data_len = ep->plen;
788 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
789 event.provider_data = ep;
6e47fe43
SW
790 if (state_read(&ep->parent_ep->com) != DEAD) {
791 get_ep(&ep->com);
b038ced7
SW
792 ep->parent_ep->com.cm_id->event_handler(
793 ep->parent_ep->com.cm_id,
794 &event);
6e47fe43 795 }
b038ced7
SW
796 put_ep(&ep->parent_ep->com);
797 ep->parent_ep = NULL;
798}
799
800static void established_upcall(struct iwch_ep *ep)
801{
802 struct iw_cm_event event;
803
33718363 804 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
805 memset(&event, 0, sizeof(event));
806 event.event = IW_CM_EVENT_ESTABLISHED;
807 if (ep->com.cm_id) {
33718363 808 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
b038ced7
SW
809 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
810 }
811}
812
813static int update_rx_credits(struct iwch_ep *ep, u32 credits)
814{
815 struct cpl_rx_data_ack *req;
816 struct sk_buff *skb;
817
33718363 818 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
b038ced7
SW
819 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
820 if (!skb) {
821 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
822 return 0;
823 }
824
825 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
826 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
827 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
828 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
829 skb->priority = CPL_PRIORITY_ACK;
04b5d028 830 iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
831 return credits;
832}
833
834static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
835{
836 struct mpa_message *mpa;
837 u16 plen;
838 struct iwch_qp_attributes attrs;
839 enum iwch_qp_attr_mask mask;
840 int err;
841
33718363 842 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
843
844 /*
845 * Stop mpa timer. If it expired, then the state has
846 * changed and we bail since ep_timeout already aborted
847 * the connection.
848 */
849 stop_ep_timer(ep);
850 if (state_read(&ep->com) != MPA_REQ_SENT)
851 return;
852
853 /*
854 * If we get more than the supported amount of private data
855 * then we must fail this connection.
856 */
857 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
858 err = -EINVAL;
859 goto err;
860 }
861
862 /*
863 * copy the new data into our accumulation buffer.
864 */
d626f62b
ACM
865 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
866 skb->len);
b038ced7
SW
867 ep->mpa_pkt_len += skb->len;
868
869 /*
870 * if we don't even have the mpa message, then bail.
871 */
872 if (ep->mpa_pkt_len < sizeof(*mpa))
873 return;
874 mpa = (struct mpa_message *) ep->mpa_pkt;
875
876 /* Validate MPA header. */
877 if (mpa->revision != mpa_rev) {
878 err = -EPROTO;
879 goto err;
880 }
881 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
882 err = -EPROTO;
883 goto err;
884 }
885
886 plen = ntohs(mpa->private_data_size);
887
888 /*
889 * Fail if there's too much private data.
890 */
891 if (plen > MPA_MAX_PRIVATE_DATA) {
892 err = -EPROTO;
893 goto err;
894 }
895
896 /*
897 * If plen does not account for pkt size
898 */
899 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
900 err = -EPROTO;
901 goto err;
902 }
903
904 ep->plen = (u8) plen;
905
906 /*
907 * If we don't have all the pdata yet, then bail.
908 * We'll continue process when more data arrives.
909 */
910 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
911 return;
912
913 if (mpa->flags & MPA_REJECT) {
914 err = -ECONNREFUSED;
915 goto err;
916 }
917
918 /*
919 * If we get here we have accumulated the entire mpa
920 * start reply message including private data. And
921 * the MPA header is valid.
922 */
923 state_set(&ep->com, FPDU_MODE);
f8b0dfd1 924 ep->mpa_attr.initiator = 1;
b038ced7
SW
925 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
926 ep->mpa_attr.recv_marker_enabled = markers_enabled;
927 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
928 ep->mpa_attr.version = mpa_rev;
929 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
33718363 930 "xmit_marker_enabled=%d, version=%d\n", __func__,
b038ced7
SW
931 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
932 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
933
934 attrs.mpa_attr = ep->mpa_attr;
935 attrs.max_ird = ep->ird;
936 attrs.max_ord = ep->ord;
937 attrs.llp_stream_handle = ep;
938 attrs.next_state = IWCH_QP_STATE_RTS;
939
940 mask = IWCH_QP_ATTR_NEXT_STATE |
941 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
942 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
943
944 /* bind QP and TID with INIT_WR */
945 err = iwch_modify_qp(ep->com.qp->rhp,
946 ep->com.qp, mask, &attrs, 1);
f8b0dfd1
SW
947 if (err)
948 goto err;
949
950 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
951 iwch_post_zb_read(ep->com.qp);
952 }
953
954 goto out;
b038ced7
SW
955err:
956 abort_connection(ep, skb, GFP_KERNEL);
957out:
958 connect_reply_upcall(ep, err);
959 return;
960}
961
962static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
963{
964 struct mpa_message *mpa;
965 u16 plen;
966
33718363 967 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
968
969 /*
970 * Stop mpa timer. If it expired, then the state has
971 * changed and we bail since ep_timeout already aborted
972 * the connection.
973 */
974 stop_ep_timer(ep);
975 if (state_read(&ep->com) != MPA_REQ_WAIT)
976 return;
977
978 /*
979 * If we get more than the supported amount of private data
980 * then we must fail this connection.
981 */
982 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
983 abort_connection(ep, skb, GFP_KERNEL);
984 return;
985 }
986
33718363 987 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
b038ced7
SW
988
989 /*
990 * Copy the new data into our accumulation buffer.
991 */
d626f62b
ACM
992 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
993 skb->len);
b038ced7
SW
994 ep->mpa_pkt_len += skb->len;
995
996 /*
997 * If we don't even have the mpa message, then bail.
998 * We'll continue process when more data arrives.
999 */
1000 if (ep->mpa_pkt_len < sizeof(*mpa))
1001 return;
33718363 1002 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
b038ced7
SW
1003 mpa = (struct mpa_message *) ep->mpa_pkt;
1004
1005 /*
1006 * Validate MPA Header.
1007 */
1008 if (mpa->revision != mpa_rev) {
1009 abort_connection(ep, skb, GFP_KERNEL);
1010 return;
1011 }
1012
1013 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1014 abort_connection(ep, skb, GFP_KERNEL);
1015 return;
1016 }
1017
1018 plen = ntohs(mpa->private_data_size);
1019
1020 /*
1021 * Fail if there's too much private data.
1022 */
1023 if (plen > MPA_MAX_PRIVATE_DATA) {
1024 abort_connection(ep, skb, GFP_KERNEL);
1025 return;
1026 }
1027
1028 /*
1029 * If plen does not account for pkt size
1030 */
1031 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1032 abort_connection(ep, skb, GFP_KERNEL);
1033 return;
1034 }
1035 ep->plen = (u8) plen;
1036
1037 /*
1038 * If we don't have all the pdata yet, then bail.
1039 */
1040 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1041 return;
1042
1043 /*
1044 * If we get here we have accumulated the entire mpa
1045 * start reply message including private data.
1046 */
f8b0dfd1 1047 ep->mpa_attr.initiator = 0;
b038ced7
SW
1048 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1049 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1050 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1051 ep->mpa_attr.version = mpa_rev;
1052 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
33718363 1053 "xmit_marker_enabled=%d, version=%d\n", __func__,
b038ced7
SW
1054 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1055 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1056
1057 state_set(&ep->com, MPA_REQ_RCVD);
1058
1059 /* drive upcall */
1060 connect_request_upcall(ep);
1061 return;
1062}
1063
1064static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1065{
1066 struct iwch_ep *ep = ctx;
1067 struct cpl_rx_data *hdr = cplhdr(skb);
1068 unsigned int dlen = ntohs(hdr->len);
1069
33718363 1070 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
b038ced7
SW
1071
1072 skb_pull(skb, sizeof(*hdr));
1073 skb_trim(skb, dlen);
1074
de3d3530
SW
1075 ep->rcv_seq += dlen;
1076 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1077
b038ced7
SW
1078 switch (state_read(&ep->com)) {
1079 case MPA_REQ_SENT:
1080 process_mpa_reply(ep, skb);
1081 break;
1082 case MPA_REQ_WAIT:
1083 process_mpa_request(ep, skb);
1084 break;
1085 case MPA_REP_SENT:
1086 break;
1087 default:
1088 printk(KERN_ERR MOD "%s Unexpected streaming data."
1089 " ep %p state %d tid %d\n",
33718363 1090 __func__, ep, state_read(&ep->com), ep->hwtid);
b038ced7
SW
1091
1092 /*
1093 * The ep will timeout and inform the ULP of the failure.
1094 * See ep_timeout().
1095 */
1096 break;
1097 }
1098
1099 /* update RX credits */
1100 update_rx_credits(ep, dlen);
1101
1102 return CPL_RET_BUF_DONE;
1103}
1104
1105/*
1106 * Upcall from the adapter indicating data has been transmitted.
1107 * For us its just the single MPA request or reply. We can now free
1108 * the skb holding the mpa message.
1109 */
1110static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1111{
1112 struct iwch_ep *ep = ctx;
1113 struct cpl_wr_ack *hdr = cplhdr(skb);
1114 unsigned int credits = ntohs(hdr->credits);
b038ced7 1115
33718363 1116 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
b038ced7 1117
f8b0dfd1
SW
1118 if (credits == 0) {
1119 PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n",
1120 __func__, ep, state_read(&ep->com));
b038ced7 1121 return CPL_RET_BUF_DONE;
f8b0dfd1
SW
1122 }
1123
b038ced7 1124 BUG_ON(credits != 1);
b038ced7 1125 dst_confirm(ep->dst);
f8b0dfd1
SW
1126 if (!ep->mpa_skb) {
1127 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1128 __func__, ep, state_read(&ep->com));
1129 if (ep->mpa_attr.initiator) {
1130 PDBG("%s initiator ep %p state %u\n",
1131 __func__, ep, state_read(&ep->com));
1132 if (peer2peer)
1133 iwch_post_zb_read(ep->com.qp);
1134 } else {
1135 PDBG("%s responder ep %p state %u\n",
1136 __func__, ep, state_read(&ep->com));
1137 ep->com.rpl_done = 1;
1138 wake_up(&ep->com.waitq);
1139 }
1140 } else {
1141 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1142 __func__, ep, state_read(&ep->com));
1143 kfree_skb(ep->mpa_skb);
1144 ep->mpa_skb = NULL;
b038ced7
SW
1145 }
1146 return CPL_RET_BUF_DONE;
1147}
1148
1149static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1150{
1151 struct iwch_ep *ep = ctx;
989a1780
SW
1152 unsigned long flags;
1153 int release = 0;
b038ced7 1154
33718363 1155 PDBG("%s ep %p\n", __func__, ep);
989a1780 1156 BUG_ON(!ep);
b038ced7 1157
aff9e39d
SW
1158 /*
1159 * We get 2 abort replies from the HW. The first one must
1160 * be ignored except for scribbling that we need one more.
1161 */
6e47fe43 1162 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
aff9e39d
SW
1163 return CPL_RET_BUF_DONE;
1164 }
1165
989a1780
SW
1166 spin_lock_irqsave(&ep->com.lock, flags);
1167 switch (ep->com.state) {
1168 case ABORTING:
1169 close_complete_upcall(ep);
1170 __state_set(&ep->com, DEAD);
1171 release = 1;
1172 break;
1173 default:
1174 printk(KERN_ERR "%s ep %p state %d\n",
1175 __func__, ep, ep->com.state);
1176 break;
1177 }
1178 spin_unlock_irqrestore(&ep->com.lock, flags);
1179
1180 if (release)
1181 release_ep_resources(ep);
b038ced7
SW
1182 return CPL_RET_BUF_DONE;
1183}
1184
96d0e493
SW
1185/*
1186 * Return whether a failed active open has allocated a TID
1187 */
1188static inline int act_open_has_tid(int status)
1189{
1190 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1191 status != CPL_ERR_ARP_MISS;
1192}
1193
b038ced7
SW
1194static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1195{
1196 struct iwch_ep *ep = ctx;
1197 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1198
33718363 1199 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
b038ced7
SW
1200 status2errno(rpl->status));
1201 connect_reply_upcall(ep, status2errno(rpl->status));
1202 state_set(&ep->com, DEAD);
8176d297 1203 if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
b038ced7
SW
1204 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1205 cxgb3_free_atid(ep->com.tdev, ep->atid);
1206 dst_release(ep->dst);
1207 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1208 put_ep(&ep->com);
1209 return CPL_RET_BUF_DONE;
1210}
1211
1212static int listen_start(struct iwch_listen_ep *ep)
1213{
1214 struct sk_buff *skb;
1215 struct cpl_pass_open_req *req;
1216
33718363 1217 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1218 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1219 if (!skb) {
1220 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
1221 return -ENOMEM;
1222 }
1223
1224 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
1225 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1226 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1227 req->local_port = ep->com.local_addr.sin_port;
1228 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1229 req->peer_port = 0;
1230 req->peer_ip = 0;
1231 req->peer_netmask = 0;
1232 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1233 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1234 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1235
1236 skb->priority = 1;
04b5d028 1237 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
1238}
1239
1240static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1241{
1242 struct iwch_listen_ep *ep = ctx;
1243 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1244
33718363 1245 PDBG("%s ep %p status %d error %d\n", __func__, ep,
b038ced7
SW
1246 rpl->status, status2errno(rpl->status));
1247 ep->com.rpl_err = status2errno(rpl->status);
1248 ep->com.rpl_done = 1;
1249 wake_up(&ep->com.waitq);
1250
1251 return CPL_RET_BUF_DONE;
1252}
1253
1254static int listen_stop(struct iwch_listen_ep *ep)
1255{
1256 struct sk_buff *skb;
1257 struct cpl_close_listserv_req *req;
1258
33718363 1259 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1260 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1261 if (!skb) {
33718363 1262 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
b038ced7
SW
1263 return -ENOMEM;
1264 }
1265 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1266 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
60be4b59 1267 req->cpu_idx = 0;
b038ced7
SW
1268 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1269 skb->priority = 1;
04b5d028 1270 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
1271}
1272
1273static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1274 void *ctx)
1275{
1276 struct iwch_listen_ep *ep = ctx;
1277 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1278
33718363 1279 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1280 ep->com.rpl_err = status2errno(rpl->status);
1281 ep->com.rpl_done = 1;
1282 wake_up(&ep->com.waitq);
1283 return CPL_RET_BUF_DONE;
1284}
1285
1286static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1287{
1288 struct cpl_pass_accept_rpl *rpl;
1289 unsigned int mtu_idx;
1290 u32 opt0h, opt0l, opt2;
1291 int wscale;
1292
33718363 1293 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1294 BUG_ON(skb_cloned(skb));
1295 skb_trim(skb, sizeof(*rpl));
1296 skb_get(skb);
1297 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1298 wscale = compute_wscale(rcv_win);
1299 opt0h = V_NAGLE(0) |
1300 V_NO_CONG(nocong) |
1301 V_KEEP_ALIVE(1) |
1302 F_TCAM_BYPASS |
1303 V_WND_SCALE(wscale) |
1304 V_MSS_IDX(mtu_idx) |
1305 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1306 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1307 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1308
1309 rpl = cplhdr(skb);
1310 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1311 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1312 rpl->peer_ip = peer_ip;
1313 rpl->opt0h = htonl(opt0h);
1314 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1315 rpl->opt2 = htonl(opt2);
1316 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1317 skb->priority = CPL_PRIORITY_SETUP;
04b5d028 1318 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
1319
1320 return;
1321}
1322
1323static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1324 struct sk_buff *skb)
1325{
33718363 1326 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
b038ced7
SW
1327 peer_ip);
1328 BUG_ON(skb_cloned(skb));
1329 skb_trim(skb, sizeof(struct cpl_tid_release));
1330 skb_get(skb);
1331
8176d297 1332 if (tdev->type != T3A)
b038ced7
SW
1333 release_tid(tdev, hwtid, skb);
1334 else {
1335 struct cpl_pass_accept_rpl *rpl;
1336
1337 rpl = cplhdr(skb);
1338 skb->priority = CPL_PRIORITY_SETUP;
1339 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1340 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1341 hwtid));
1342 rpl->peer_ip = peer_ip;
1343 rpl->opt0h = htonl(F_TCAM_BYPASS);
1344 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1345 rpl->opt2 = 0;
1346 rpl->rsvd = rpl->opt2;
04b5d028 1347 iwch_cxgb3_ofld_send(tdev, skb);
b038ced7
SW
1348 }
1349}
1350
1351static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1352{
1353 struct iwch_ep *child_ep, *parent_ep = ctx;
1354 struct cpl_pass_accept_req *req = cplhdr(skb);
1355 unsigned int hwtid = GET_TID(req);
1356 struct dst_entry *dst;
1357 struct l2t_entry *l2t;
1358 struct rtable *rt;
1359 struct iff_mac tim;
1360
33718363 1361 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
b038ced7
SW
1362
1363 if (state_read(&parent_ep->com) != LISTEN) {
1364 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
33718363 1365 __func__);
b038ced7
SW
1366 goto reject;
1367 }
1368
1369 /*
1370 * Find the netdev for this connection request.
1371 */
1372 tim.mac_addr = req->dst_mac;
1373 tim.vlan_tag = ntohs(req->vlan_tag);
1374 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
eacc4d6a
HS
1375 printk(KERN_ERR "%s bad dst mac %pM\n",
1376 __func__, req->dst_mac);
b038ced7
SW
1377 goto reject;
1378 }
1379
1380 /* Find output route */
1381 rt = find_route(tdev,
1382 req->local_ip,
1383 req->peer_ip,
1384 req->local_port,
1385 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1386 if (!rt) {
1387 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
33718363 1388 __func__);
b038ced7
SW
1389 goto reject;
1390 }
1391 dst = &rt->u.dst;
1392 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1393 if (!l2t) {
1394 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
33718363 1395 __func__);
b038ced7
SW
1396 dst_release(dst);
1397 goto reject;
1398 }
1399 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1400 if (!child_ep) {
1401 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
33718363 1402 __func__);
b038ced7
SW
1403 l2t_release(L2DATA(tdev), l2t);
1404 dst_release(dst);
1405 goto reject;
1406 }
1407 state_set(&child_ep->com, CONNECTING);
1408 child_ep->com.tdev = tdev;
1409 child_ep->com.cm_id = NULL;
1410 child_ep->com.local_addr.sin_family = PF_INET;
1411 child_ep->com.local_addr.sin_port = req->local_port;
1412 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1413 child_ep->com.remote_addr.sin_family = PF_INET;
1414 child_ep->com.remote_addr.sin_port = req->peer_port;
1415 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1416 get_ep(&parent_ep->com);
1417 child_ep->parent_ep = parent_ep;
1418 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1419 child_ep->l2t = l2t;
1420 child_ep->dst = dst;
1421 child_ep->hwtid = hwtid;
1422 init_timer(&child_ep->timer);
1423 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1424 accept_cr(child_ep, req->peer_ip, skb);
1425 goto out;
1426reject:
1427 reject_cr(tdev, hwtid, req->peer_ip, skb);
1428out:
1429 return CPL_RET_BUF_DONE;
1430}
1431
1432static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1433{
1434 struct iwch_ep *ep = ctx;
1435 struct cpl_pass_establish *req = cplhdr(skb);
1436
33718363 1437 PDBG("%s ep %p\n", __func__, ep);
b038ced7 1438 ep->snd_seq = ntohl(req->snd_isn);
de3d3530 1439 ep->rcv_seq = ntohl(req->rcv_isn);
b038ced7
SW
1440
1441 set_emss(ep, ntohs(req->tcp_opt));
1442
1443 dst_confirm(ep->dst);
1444 state_set(&ep->com, MPA_REQ_WAIT);
1445 start_ep_timer(ep);
1446
1447 return CPL_RET_BUF_DONE;
1448}
1449
1450static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1451{
1452 struct iwch_ep *ep = ctx;
1453 struct iwch_qp_attributes attrs;
1454 unsigned long flags;
1455 int disconnect = 1;
1456 int release = 0;
1457
33718363 1458 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1459 dst_confirm(ep->dst);
1460
1461 spin_lock_irqsave(&ep->com.lock, flags);
1462 switch (ep->com.state) {
1463 case MPA_REQ_WAIT:
1464 __state_set(&ep->com, CLOSING);
1465 break;
1466 case MPA_REQ_SENT:
1467 __state_set(&ep->com, CLOSING);
1468 connect_reply_upcall(ep, -ECONNRESET);
1469 break;
1470 case MPA_REQ_RCVD:
1471
1472 /*
1473 * We're gonna mark this puppy DEAD, but keep
1474 * the reference on it until the ULP accepts or
a52bf98d
SW
1475 * rejects the CR. Also wake up anyone waiting
1476 * in rdma connection migration (see iwch_accept_cr()).
b038ced7
SW
1477 */
1478 __state_set(&ep->com, CLOSING);
a52bf98d
SW
1479 ep->com.rpl_done = 1;
1480 ep->com.rpl_err = -ECONNRESET;
1481 PDBG("waking up ep %p\n", ep);
1482 wake_up(&ep->com.waitq);
b038ced7
SW
1483 break;
1484 case MPA_REP_SENT:
1485 __state_set(&ep->com, CLOSING);
1486 ep->com.rpl_done = 1;
1487 ep->com.rpl_err = -ECONNRESET;
1488 PDBG("waking up ep %p\n", ep);
1489 wake_up(&ep->com.waitq);
1490 break;
1491 case FPDU_MODE:
42e31753 1492 start_ep_timer(ep);
b038ced7
SW
1493 __state_set(&ep->com, CLOSING);
1494 attrs.next_state = IWCH_QP_STATE_CLOSING;
1495 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1496 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1497 peer_close_upcall(ep);
1498 break;
1499 case ABORTING:
1500 disconnect = 0;
1501 break;
1502 case CLOSING:
b038ced7
SW
1503 __state_set(&ep->com, MORIBUND);
1504 disconnect = 0;
1505 break;
1506 case MORIBUND:
1507 stop_ep_timer(ep);
1508 if (ep->com.cm_id && ep->com.qp) {
1509 attrs.next_state = IWCH_QP_STATE_IDLE;
1510 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1511 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1512 }
1513 close_complete_upcall(ep);
1514 __state_set(&ep->com, DEAD);
1515 release = 1;
1516 disconnect = 0;
1517 break;
1518 case DEAD:
1519 disconnect = 0;
1520 break;
1521 default:
1522 BUG_ON(1);
1523 }
1524 spin_unlock_irqrestore(&ep->com.lock, flags);
1525 if (disconnect)
1526 iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1527 if (release)
1528 release_ep_resources(ep);
1529 return CPL_RET_BUF_DONE;
1530}
1531
1532/*
1533 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1534 */
2b540355 1535static int is_neg_adv_abort(unsigned int status)
b038ced7
SW
1536{
1537 return status == CPL_ERR_RTX_NEG_ADVICE ||
1538 status == CPL_ERR_PERSIST_NEG_ADVICE;
1539}
1540
1541static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1542{
1543 struct cpl_abort_req_rss *req = cplhdr(skb);
1544 struct iwch_ep *ep = ctx;
1545 struct cpl_abort_rpl *rpl;
1546 struct sk_buff *rpl_skb;
1547 struct iwch_qp_attributes attrs;
1548 int ret;
989a1780
SW
1549 int release = 0;
1550 unsigned long flags;
b038ced7 1551
1580367e 1552 if (is_neg_adv_abort(req->status)) {
33718363 1553 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1580367e
SW
1554 ep->hwtid);
1555 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1556 return CPL_RET_BUF_DONE;
1557 }
1558
aff9e39d
SW
1559 /*
1560 * We get 2 peer aborts from the HW. The first one must
1561 * be ignored except for scribbling that we need one more.
1562 */
6e47fe43 1563 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
aff9e39d
SW
1564 return CPL_RET_BUF_DONE;
1565 }
1566
989a1780
SW
1567 spin_lock_irqsave(&ep->com.lock, flags);
1568 PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
1569 switch (ep->com.state) {
b038ced7
SW
1570 case CONNECTING:
1571 break;
1572 case MPA_REQ_WAIT:
adf376b3 1573 stop_ep_timer(ep);
b038ced7
SW
1574 break;
1575 case MPA_REQ_SENT:
adf376b3 1576 stop_ep_timer(ep);
b038ced7
SW
1577 connect_reply_upcall(ep, -ECONNRESET);
1578 break;
1579 case MPA_REP_SENT:
1580 ep->com.rpl_done = 1;
1581 ep->com.rpl_err = -ECONNRESET;
1582 PDBG("waking up ep %p\n", ep);
1583 wake_up(&ep->com.waitq);
1584 break;
1585 case MPA_REQ_RCVD:
1586
1587 /*
1588 * We're gonna mark this puppy DEAD, but keep
1589 * the reference on it until the ULP accepts or
a52bf98d
SW
1590 * rejects the CR. Also wake up anyone waiting
1591 * in rdma connection migration (see iwch_accept_cr()).
b038ced7 1592 */
a52bf98d
SW
1593 ep->com.rpl_done = 1;
1594 ep->com.rpl_err = -ECONNRESET;
1595 PDBG("waking up ep %p\n", ep);
1596 wake_up(&ep->com.waitq);
b038ced7
SW
1597 break;
1598 case MORIBUND:
42e31753 1599 case CLOSING:
b038ced7 1600 stop_ep_timer(ep);
42e31753 1601 /*FALLTHROUGH*/
b038ced7 1602 case FPDU_MODE:
b038ced7
SW
1603 if (ep->com.cm_id && ep->com.qp) {
1604 attrs.next_state = IWCH_QP_STATE_ERROR;
1605 ret = iwch_modify_qp(ep->com.qp->rhp,
1606 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1607 &attrs, 1);
1608 if (ret)
1609 printk(KERN_ERR MOD
1610 "%s - qp <- error failed!\n",
33718363 1611 __func__);
b038ced7
SW
1612 }
1613 peer_abort_upcall(ep);
1614 break;
1615 case ABORTING:
1616 break;
1617 case DEAD:
33718363 1618 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
989a1780 1619 spin_unlock_irqrestore(&ep->com.lock, flags);
b038ced7
SW
1620 return CPL_RET_BUF_DONE;
1621 default:
1622 BUG_ON(1);
1623 break;
1624 }
1625 dst_confirm(ep->dst);
989a1780
SW
1626 if (ep->com.state != ABORTING) {
1627 __state_set(&ep->com, DEAD);
1628 release = 1;
1629 }
1630 spin_unlock_irqrestore(&ep->com.lock, flags);
b038ced7
SW
1631
1632 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1633 if (!rpl_skb) {
1634 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
33718363 1635 __func__);
989a1780
SW
1636 release = 1;
1637 goto out;
b038ced7
SW
1638 }
1639 rpl_skb->priority = CPL_PRIORITY_DATA;
1640 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1641 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1642 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1643 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1644 rpl->cmd = CPL_ABORT_NO_RST;
04b5d028 1645 iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
989a1780
SW
1646out:
1647 if (release)
b038ced7 1648 release_ep_resources(ep);
b038ced7
SW
1649 return CPL_RET_BUF_DONE;
1650}
1651
1652static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1653{
1654 struct iwch_ep *ep = ctx;
1655 struct iwch_qp_attributes attrs;
1656 unsigned long flags;
1657 int release = 0;
1658
33718363 1659 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1660 BUG_ON(!ep);
1661
1662 /* The cm_id may be null if we failed to connect */
1663 spin_lock_irqsave(&ep->com.lock, flags);
1664 switch (ep->com.state) {
1665 case CLOSING:
b038ced7
SW
1666 __state_set(&ep->com, MORIBUND);
1667 break;
1668 case MORIBUND:
1669 stop_ep_timer(ep);
1670 if ((ep->com.cm_id) && (ep->com.qp)) {
1671 attrs.next_state = IWCH_QP_STATE_IDLE;
1672 iwch_modify_qp(ep->com.qp->rhp,
1673 ep->com.qp,
1674 IWCH_QP_ATTR_NEXT_STATE,
1675 &attrs, 1);
1676 }
1677 close_complete_upcall(ep);
1678 __state_set(&ep->com, DEAD);
1679 release = 1;
1680 break;
42e31753 1681 case ABORTING:
b038ced7 1682 case DEAD:
c4d49776 1683 break;
b038ced7
SW
1684 default:
1685 BUG_ON(1);
1686 break;
1687 }
1688 spin_unlock_irqrestore(&ep->com.lock, flags);
1689 if (release)
1690 release_ep_resources(ep);
1691 return CPL_RET_BUF_DONE;
1692}
1693
1694/*
1695 * T3A does 3 things when a TERM is received:
1696 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1697 * 2) generate an async event on the QP with the TERMINATE opcode
1698 * 3) post a TERMINATE opcde cqe into the associated CQ.
1699 *
1700 * For (1), we save the message in the qp for later consumer consumption.
1701 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1702 * For (3), we toss the CQE in cxio_poll_cq().
1703 *
1704 * terminate() handles case (1)...
1705 */
1706static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1707{
1708 struct iwch_ep *ep = ctx;
1709
42fb61f0
SW
1710 if (state_read(&ep->com) != FPDU_MODE)
1711 return CPL_RET_BUF_DONE;
1712
33718363 1713 PDBG("%s ep %p\n", __func__, ep);
b038ced7 1714 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
33718363 1715 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
d626f62b
ACM
1716 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1717 skb->len);
b038ced7
SW
1718 ep->com.qp->attr.terminate_msg_len = skb->len;
1719 ep->com.qp->attr.is_terminate_local = 0;
1720 return CPL_RET_BUF_DONE;
1721}
1722
1723static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1724{
1725 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1726 struct iwch_ep *ep = ctx;
1727
33718363 1728 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
b038ced7
SW
1729 rep->status);
1730 if (rep->status) {
1731 struct iwch_qp_attributes attrs;
1732
1733 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
33718363 1734 __func__, ep->hwtid);
2f236735 1735 stop_ep_timer(ep);
b038ced7
SW
1736 attrs.next_state = IWCH_QP_STATE_ERROR;
1737 iwch_modify_qp(ep->com.qp->rhp,
1738 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1739 &attrs, 1);
1740 abort_connection(ep, NULL, GFP_KERNEL);
1741 }
1742 return CPL_RET_BUF_DONE;
1743}
1744
1745static void ep_timeout(unsigned long arg)
1746{
1747 struct iwch_ep *ep = (struct iwch_ep *)arg;
1748 struct iwch_qp_attributes attrs;
1749 unsigned long flags;
989a1780 1750 int abort = 1;
b038ced7
SW
1751
1752 spin_lock_irqsave(&ep->com.lock, flags);
33718363 1753 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
b038ced7
SW
1754 ep->com.state);
1755 switch (ep->com.state) {
1756 case MPA_REQ_SENT:
989a1780 1757 __state_set(&ep->com, ABORTING);
b038ced7
SW
1758 connect_reply_upcall(ep, -ETIMEDOUT);
1759 break;
1760 case MPA_REQ_WAIT:
989a1780 1761 __state_set(&ep->com, ABORTING);
b038ced7 1762 break;
42e31753 1763 case CLOSING:
b038ced7
SW
1764 case MORIBUND:
1765 if (ep->com.cm_id && ep->com.qp) {
1766 attrs.next_state = IWCH_QP_STATE_ERROR;
1767 iwch_modify_qp(ep->com.qp->rhp,
1768 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1769 &attrs, 1);
1770 }
989a1780 1771 __state_set(&ep->com, ABORTING);
b038ced7
SW
1772 break;
1773 default:
989a1780
SW
1774 printk(KERN_ERR "%s unexpected state ep %p state %u\n",
1775 __func__, ep, ep->com.state);
1776 WARN_ON(1);
1777 abort = 0;
b038ced7 1778 }
b038ced7 1779 spin_unlock_irqrestore(&ep->com.lock, flags);
989a1780
SW
1780 if (abort)
1781 abort_connection(ep, NULL, GFP_ATOMIC);
b038ced7
SW
1782 put_ep(&ep->com);
1783}
1784
1785int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1786{
1787 int err;
1788 struct iwch_ep *ep = to_ep(cm_id);
33718363 1789 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
b038ced7
SW
1790
1791 if (state_read(&ep->com) == DEAD) {
1792 put_ep(&ep->com);
1793 return -ECONNRESET;
1794 }
1795 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
b038ced7
SW
1796 if (mpa_rev == 0)
1797 abort_connection(ep, NULL, GFP_KERNEL);
1798 else {
1799 err = send_mpa_reject(ep, pdata, pdata_len);
7d526e6b 1800 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
b038ced7 1801 }
6e47fe43 1802 put_ep(&ep->com);
b038ced7
SW
1803 return 0;
1804}
1805
1806int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1807{
1808 int err;
1809 struct iwch_qp_attributes attrs;
1810 enum iwch_qp_attr_mask mask;
1811 struct iwch_ep *ep = to_ep(cm_id);
1812 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1813 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1814
33718363 1815 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
6e47fe43
SW
1816 if (state_read(&ep->com) == DEAD) {
1817 err = -ECONNRESET;
1818 goto err;
1819 }
b038ced7
SW
1820
1821 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1822 BUG_ON(!qp);
1823
1824 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1825 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1826 abort_connection(ep, NULL, GFP_KERNEL);
6e47fe43
SW
1827 err = -EINVAL;
1828 goto err;
b038ced7
SW
1829 }
1830
1831 cm_id->add_ref(cm_id);
1832 ep->com.cm_id = cm_id;
1833 ep->com.qp = qp;
1834
b038ced7
SW
1835 ep->ird = conn_param->ird;
1836 ep->ord = conn_param->ord;
96ac7e88
SW
1837
1838 if (peer2peer && ep->ird == 0)
1839 ep->ird = 1;
1840
33718363 1841 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
de3d3530 1842
b038ced7
SW
1843 /* bind QP to EP and move to RTS */
1844 attrs.mpa_attr = ep->mpa_attr;
1f71f503 1845 attrs.max_ird = ep->ird;
b038ced7
SW
1846 attrs.max_ord = ep->ord;
1847 attrs.llp_stream_handle = ep;
1848 attrs.next_state = IWCH_QP_STATE_RTS;
1849
1850 /* bind QP and TID with INIT_WR */
1851 mask = IWCH_QP_ATTR_NEXT_STATE |
1852 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1853 IWCH_QP_ATTR_MPA_ATTR |
1854 IWCH_QP_ATTR_MAX_IRD |
1855 IWCH_QP_ATTR_MAX_ORD;
1856
1857 err = iwch_modify_qp(ep->com.qp->rhp,
1858 ep->com.qp, mask, &attrs, 1);
de3d3530 1859 if (err)
6e47fe43 1860 goto err1;
b038ced7 1861
f8b0dfd1
SW
1862 /* if needed, wait for wr_ack */
1863 if (iwch_rqes_posted(qp)) {
1864 wait_event(ep->com.waitq, ep->com.rpl_done);
1865 err = ep->com.rpl_err;
1866 if (err)
6e47fe43 1867 goto err1;
f8b0dfd1
SW
1868 }
1869
de3d3530
SW
1870 err = send_mpa_reply(ep, conn_param->private_data,
1871 conn_param->private_data_len);
1872 if (err)
6e47fe43 1873 goto err1;
de3d3530 1874
de3d3530
SW
1875
1876 state_set(&ep->com, FPDU_MODE);
1877 established_upcall(ep);
1878 put_ep(&ep->com);
1879 return 0;
6e47fe43 1880err1:
de3d3530
SW
1881 ep->com.cm_id = NULL;
1882 ep->com.qp = NULL;
1883 cm_id->rem_ref(cm_id);
6e47fe43 1884err:
b038ced7
SW
1885 put_ep(&ep->com);
1886 return err;
1887}
1888
8704e9a8
SW
1889static int is_loopback_dst(struct iw_cm_id *cm_id)
1890{
1891 struct net_device *dev;
1892
1893 dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
1894 if (!dev)
1895 return 0;
1896 dev_put(dev);
1897 return 1;
1898}
1899
b038ced7
SW
1900int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1901{
1902 int err = 0;
1903 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1904 struct iwch_ep *ep;
1905 struct rtable *rt;
1906
8704e9a8
SW
1907 if (is_loopback_dst(cm_id)) {
1908 err = -ENOSYS;
1909 goto out;
1910 }
1911
b038ced7
SW
1912 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1913 if (!ep) {
33718363 1914 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
b038ced7
SW
1915 err = -ENOMEM;
1916 goto out;
1917 }
1918 init_timer(&ep->timer);
1919 ep->plen = conn_param->private_data_len;
1920 if (ep->plen)
1921 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1922 conn_param->private_data, ep->plen);
1923 ep->ird = conn_param->ird;
1924 ep->ord = conn_param->ord;
96ac7e88
SW
1925
1926 if (peer2peer && ep->ord == 0)
1927 ep->ord = 1;
1928
b038ced7
SW
1929 ep->com.tdev = h->rdev.t3cdev_p;
1930
1931 cm_id->add_ref(cm_id);
1932 ep->com.cm_id = cm_id;
1933 ep->com.qp = get_qhp(h, conn_param->qpn);
1934 BUG_ON(!ep->com.qp);
33718363 1935 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
b038ced7
SW
1936 ep->com.qp, cm_id);
1937
1938 /*
1939 * Allocate an active TID to initiate a TCP connection.
1940 */
1941 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1942 if (ep->atid == -1) {
33718363 1943 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
b038ced7
SW
1944 err = -ENOMEM;
1945 goto fail2;
1946 }
1947
1948 /* find a route */
1949 rt = find_route(h->rdev.t3cdev_p,
1950 cm_id->local_addr.sin_addr.s_addr,
1951 cm_id->remote_addr.sin_addr.s_addr,
1952 cm_id->local_addr.sin_port,
1953 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1954 if (!rt) {
33718363 1955 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
b038ced7
SW
1956 err = -EHOSTUNREACH;
1957 goto fail3;
1958 }
1959 ep->dst = &rt->u.dst;
1960
1961 /* get a l2t entry */
1962 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1963 ep->dst->neighbour->dev);
1964 if (!ep->l2t) {
33718363 1965 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
b038ced7
SW
1966 err = -ENOMEM;
1967 goto fail4;
1968 }
1969
1970 state_set(&ep->com, CONNECTING);
1971 ep->tos = IPTOS_LOWDELAY;
1972 ep->com.local_addr = cm_id->local_addr;
1973 ep->com.remote_addr = cm_id->remote_addr;
1974
1975 /* send connect request to rnic */
1976 err = send_connect(ep);
1977 if (!err)
1978 goto out;
1979
1980 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
1981fail4:
1982 dst_release(ep->dst);
1983fail3:
1984 cxgb3_free_atid(ep->com.tdev, ep->atid);
1985fail2:
dc35fac9 1986 cm_id->rem_ref(cm_id);
b038ced7
SW
1987 put_ep(&ep->com);
1988out:
1989 return err;
1990}
1991
1992int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1993{
1994 int err = 0;
1995 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1996 struct iwch_listen_ep *ep;
1997
1998
1999 might_sleep();
2000
2001 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2002 if (!ep) {
33718363 2003 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
b038ced7
SW
2004 err = -ENOMEM;
2005 goto fail1;
2006 }
33718363 2007 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
2008 ep->com.tdev = h->rdev.t3cdev_p;
2009 cm_id->add_ref(cm_id);
2010 ep->com.cm_id = cm_id;
2011 ep->backlog = backlog;
2012 ep->com.local_addr = cm_id->local_addr;
2013
2014 /*
2015 * Allocate a server TID.
2016 */
2017 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
2018 if (ep->stid == -1) {
33718363 2019 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
b038ced7
SW
2020 err = -ENOMEM;
2021 goto fail2;
2022 }
2023
2024 state_set(&ep->com, LISTEN);
2025 err = listen_start(ep);
2026 if (err)
2027 goto fail3;
2028
2029 /* wait for pass_open_rpl */
2030 wait_event(ep->com.waitq, ep->com.rpl_done);
2031 err = ep->com.rpl_err;
2032 if (!err) {
2033 cm_id->provider_data = ep;
2034 goto out;
2035 }
2036fail3:
2037 cxgb3_free_stid(ep->com.tdev, ep->stid);
2038fail2:
1b07db70 2039 cm_id->rem_ref(cm_id);
b038ced7
SW
2040 put_ep(&ep->com);
2041fail1:
2042out:
2043 return err;
2044}
2045
2046int iwch_destroy_listen(struct iw_cm_id *cm_id)
2047{
2048 int err;
2049 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
2050
33718363 2051 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
2052
2053 might_sleep();
2054 state_set(&ep->com, DEAD);
2055 ep->com.rpl_done = 0;
2056 ep->com.rpl_err = 0;
2057 err = listen_stop(ep);
04b5d028
SW
2058 if (err)
2059 goto done;
b038ced7
SW
2060 wait_event(ep->com.waitq, ep->com.rpl_done);
2061 cxgb3_free_stid(ep->com.tdev, ep->stid);
04b5d028 2062done:
b038ced7
SW
2063 err = ep->com.rpl_err;
2064 cm_id->rem_ref(cm_id);
2065 put_ep(&ep->com);
2066 return err;
2067}
2068
2069int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2070{
2071 int ret=0;
2072 unsigned long flags;
2073 int close = 0;
04b5d028
SW
2074 int fatal = 0;
2075 struct t3cdev *tdev;
2076 struct cxio_rdev *rdev;
b038ced7
SW
2077
2078 spin_lock_irqsave(&ep->com.lock, flags);
2079
33718363 2080 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
b038ced7
SW
2081 states[ep->com.state], abrupt);
2082
04b5d028
SW
2083 tdev = (struct t3cdev *)ep->com.tdev;
2084 rdev = (struct cxio_rdev *)tdev->ulp;
2085 if (cxio_fatal_error(rdev)) {
2086 fatal = 1;
2087 close_complete_upcall(ep);
2088 ep->com.state = DEAD;
2089 }
b038ced7
SW
2090 switch (ep->com.state) {
2091 case MPA_REQ_WAIT:
2092 case MPA_REQ_SENT:
2093 case MPA_REQ_RCVD:
2094 case MPA_REP_SENT:
2095 case FPDU_MODE:
b038ced7 2096 close = 1;
989a1780
SW
2097 if (abrupt)
2098 ep->com.state = ABORTING;
2099 else {
2100 ep->com.state = CLOSING;
2101 start_ep_timer(ep);
2102 }
6e47fe43 2103 set_bit(CLOSE_SENT, &ep->com.flags);
b038ced7
SW
2104 break;
2105 case CLOSING:
6e47fe43
SW
2106 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2107 close = 1;
2108 if (abrupt) {
2109 stop_ep_timer(ep);
2110 ep->com.state = ABORTING;
2111 } else
2112 ep->com.state = MORIBUND;
2113 }
b038ced7
SW
2114 break;
2115 case MORIBUND:
989a1780
SW
2116 case ABORTING:
2117 case DEAD:
2118 PDBG("%s ignoring disconnect ep %p state %u\n",
2119 __func__, ep, ep->com.state);
b038ced7
SW
2120 break;
2121 default:
2122 BUG();
2123 break;
2124 }
989a1780 2125
b038ced7
SW
2126 spin_unlock_irqrestore(&ep->com.lock, flags);
2127 if (close) {
2128 if (abrupt)
2129 ret = send_abort(ep, NULL, gfp);
2130 else
2131 ret = send_halfclose(ep, gfp);
04b5d028
SW
2132 if (ret)
2133 fatal = 1;
b038ced7 2134 }
04b5d028
SW
2135 if (fatal)
2136 release_ep_resources(ep);
b038ced7
SW
2137 return ret;
2138}
2139
2140int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2141 struct l2t_entry *l2t)
2142{
2143 struct iwch_ep *ep = ctx;
2144
2145 if (ep->dst != old)
2146 return 0;
2147
33718363 2148 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
b038ced7
SW
2149 l2t);
2150 dst_hold(new);
2151 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2152 ep->l2t = l2t;
2153 dst_release(old);
2154 ep->dst = new;
2155 return 1;
2156}
2157
2158/*
2159 * All the CM events are handled on a work queue to have a safe context.
2160 */
2161static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2162{
2163 struct iwch_ep_common *epc = ctx;
2164
2165 get_ep(epc);
2166
2167 /*
2168 * Save ctx and tdev in the skb->cb area.
2169 */
2170 *((void **) skb->cb) = ctx;
2171 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2172
2173 /*
2174 * Queue the skb and schedule the worker thread.
2175 */
2176 skb_queue_tail(&rxq, skb);
2177 queue_work(workq, &skb_work);
2178 return 0;
2179}
2180
1ca19770
SW
2181static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2182{
2183 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2184
2185 if (rpl->status != CPL_ERR_NONE) {
2186 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2187 "for tid %u\n", rpl->status, GET_TID(rpl));
2188 }
2189 return CPL_RET_BUF_DONE;
2190}
2191
b038ced7
SW
2192int __init iwch_cm_init(void)
2193{
2194 skb_queue_head_init(&rxq);
2195
2196 workq = create_singlethread_workqueue("iw_cxgb3");
2197 if (!workq)
2198 return -ENOMEM;
2199
2200 /*
2201 * All upcalls from the T3 Core go to sched() to
2202 * schedule the processing on a work queue.
2203 */
2204 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2205 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2206 t3c_handlers[CPL_RX_DATA] = sched;
2207 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2208 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2209 t3c_handlers[CPL_ABORT_RPL] = sched;
2210 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2211 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2212 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2213 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2214 t3c_handlers[CPL_PEER_CLOSE] = sched;
2215 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2216 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2217 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2218 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
1ca19770 2219 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
b038ced7
SW
2220
2221 /*
2222 * These are the real handlers that are called from a
2223 * work queue.
2224 */
2225 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2226 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2227 work_handlers[CPL_RX_DATA] = rx_data;
2228 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2229 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2230 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2231 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2232 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2233 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2234 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2235 work_handlers[CPL_PEER_CLOSE] = peer_close;
2236 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2237 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2238 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2239 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2240 return 0;
2241}
2242
2243void __exit iwch_cm_term(void)
2244{
2245 flush_workqueue(workq);
2246 destroy_workqueue(workq);
2247}