2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/rbtree.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/completion.h>
47 #include <linux/slab.h>
48 #include <linux/module.h>
50 #include <rdma/iw_cm.h>
51 #include <rdma/ib_addr.h>
55 MODULE_AUTHOR("Tom Tucker");
56 MODULE_DESCRIPTION("iWARP CM");
57 MODULE_LICENSE("Dual BSD/GPL");
59 static struct workqueue_struct *iwcm_wq;
61 struct work_struct work;
62 struct iwcm_id_private *cm_id;
63 struct list_head list;
64 struct iw_cm_event event;
65 struct list_head free_list;
69 * The following services provide a mechanism for pre-allocating iwcm_work
70 * elements. The design pre-allocates them based on the cm_id type:
71 * LISTENING IDS: Get enough elements preallocated to handle the
73 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
74 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
76 * Allocating them in connect and listen avoids having to deal
77 * with allocation failures on the event upcall from the provider (which
78 * is called in the interrupt context).
80 * One exception is when creating the cm_id for incoming connection requests.
81 * There are two cases:
82 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
83 * the backlog is exceeded, then no more connection request events will
84 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
85 * to the provider to reject the connection request.
86 * 2) in the connection request workqueue handler, cm_conn_req_handler().
87 * If work elements cannot be allocated for the new connect request cm_id,
88 * then IWCM will call the provider reject method. This is ok since
89 * cm_conn_req_handler() runs in the workqueue thread context.
92 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
94 struct iwcm_work *work;
96 if (list_empty(&cm_id_priv->work_free_list))
98 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
100 list_del_init(&work->free_list);
104 static void put_work(struct iwcm_work *work)
106 list_add(&work->free_list, &work->cm_id->work_free_list);
109 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
111 struct list_head *e, *tmp;
113 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
114 kfree(list_entry(e, struct iwcm_work, free_list));
117 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
119 struct iwcm_work *work;
121 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
123 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
125 dealloc_work_entries(cm_id_priv);
128 work->cm_id = cm_id_priv;
129 INIT_LIST_HEAD(&work->list);
136 * Save private data from incoming connection requests to
137 * iw_cm_event, so the low level driver doesn't have to. Adjust
138 * the event ptr to point to the local copy.
140 static int copy_private_data(struct iw_cm_event *event)
144 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
147 event->private_data = p;
151 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
153 dealloc_work_entries(cm_id_priv);
158 * Release a reference on cm_id. If the last reference is being
159 * released, enable the waiting thread (in iw_destroy_cm_id) to
160 * get woken up, and return 1 if a thread is already waiting.
162 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
164 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
165 if (atomic_dec_and_test(&cm_id_priv->refcount)) {
166 BUG_ON(!list_empty(&cm_id_priv->work_list));
167 complete(&cm_id_priv->destroy_comp);
174 static void add_ref(struct iw_cm_id *cm_id)
176 struct iwcm_id_private *cm_id_priv;
177 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
178 atomic_inc(&cm_id_priv->refcount);
181 static void rem_ref(struct iw_cm_id *cm_id)
183 struct iwcm_id_private *cm_id_priv;
186 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
189 * Test bit before deref in case the cm_id gets freed on another
192 cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
193 if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
194 BUG_ON(!list_empty(&cm_id_priv->work_list));
195 free_cm_id(cm_id_priv);
199 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
201 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
202 iw_cm_handler cm_handler,
205 struct iwcm_id_private *cm_id_priv;
207 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
209 return ERR_PTR(-ENOMEM);
211 cm_id_priv->state = IW_CM_STATE_IDLE;
212 cm_id_priv->id.device = device;
213 cm_id_priv->id.cm_handler = cm_handler;
214 cm_id_priv->id.context = context;
215 cm_id_priv->id.event_handler = cm_event_handler;
216 cm_id_priv->id.add_ref = add_ref;
217 cm_id_priv->id.rem_ref = rem_ref;
218 spin_lock_init(&cm_id_priv->lock);
219 atomic_set(&cm_id_priv->refcount, 1);
220 init_waitqueue_head(&cm_id_priv->connect_wait);
221 init_completion(&cm_id_priv->destroy_comp);
222 INIT_LIST_HEAD(&cm_id_priv->work_list);
223 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
225 return &cm_id_priv->id;
227 EXPORT_SYMBOL(iw_create_cm_id);
230 static int iwcm_modify_qp_err(struct ib_qp *qp)
232 struct ib_qp_attr qp_attr;
237 qp_attr.qp_state = IB_QPS_ERR;
238 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
242 * This is really the RDMAC CLOSING state. It is most similar to the
245 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
247 struct ib_qp_attr qp_attr;
250 qp_attr.qp_state = IB_QPS_SQD;
251 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
257 * Block if a passive or active connection is currently being processed. Then
258 * process the event as follows:
259 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
260 * based on the abrupt flag
261 * - If the connection is already in the CLOSING or IDLE state, the peer is
262 * disconnecting concurrently with us and we've already seen the
263 * DISCONNECT event -- ignore the request and return 0
264 * - Disconnect on a listening endpoint returns -EINVAL
266 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
268 struct iwcm_id_private *cm_id_priv;
271 struct ib_qp *qp = NULL;
273 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
274 /* Wait if we're currently in a connect or accept downcall */
275 wait_event(cm_id_priv->connect_wait,
276 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
278 spin_lock_irqsave(&cm_id_priv->lock, flags);
279 switch (cm_id_priv->state) {
280 case IW_CM_STATE_ESTABLISHED:
281 cm_id_priv->state = IW_CM_STATE_CLOSING;
283 /* QP could be <nul> for user-mode client */
289 case IW_CM_STATE_LISTEN:
292 case IW_CM_STATE_CLOSING:
293 /* remote peer closed first */
294 case IW_CM_STATE_IDLE:
295 /* accept or connect returned !0 */
297 case IW_CM_STATE_CONN_RECV:
299 * App called disconnect before/without calling accept after
300 * connect_request event delivered.
303 case IW_CM_STATE_CONN_SENT:
304 /* Can only get here if wait above fails */
308 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
312 ret = iwcm_modify_qp_err(qp);
314 ret = iwcm_modify_qp_sqd(qp);
317 * If both sides are disconnecting the QP could
318 * already be in ERR or SQD states
325 EXPORT_SYMBOL(iw_cm_disconnect);
328 * CM_ID <-- DESTROYING
330 * Clean up all resources associated with the connection and release
331 * the initial reference taken by iw_create_cm_id.
333 static void destroy_cm_id(struct iw_cm_id *cm_id)
335 struct iwcm_id_private *cm_id_priv;
339 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
341 * Wait if we're currently in a connect or accept downcall. A
342 * listening endpoint should never block here.
344 wait_event(cm_id_priv->connect_wait,
345 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
347 spin_lock_irqsave(&cm_id_priv->lock, flags);
348 switch (cm_id_priv->state) {
349 case IW_CM_STATE_LISTEN:
350 cm_id_priv->state = IW_CM_STATE_DESTROYING;
351 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
352 /* destroy the listening endpoint */
353 ret = cm_id->device->iwcm->destroy_listen(cm_id);
354 spin_lock_irqsave(&cm_id_priv->lock, flags);
356 case IW_CM_STATE_ESTABLISHED:
357 cm_id_priv->state = IW_CM_STATE_DESTROYING;
358 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
359 /* Abrupt close of the connection */
360 (void)iwcm_modify_qp_err(cm_id_priv->qp);
361 spin_lock_irqsave(&cm_id_priv->lock, flags);
363 case IW_CM_STATE_IDLE:
364 case IW_CM_STATE_CLOSING:
365 cm_id_priv->state = IW_CM_STATE_DESTROYING;
367 case IW_CM_STATE_CONN_RECV:
369 * App called destroy before/without calling accept after
370 * receiving connection request event notification or
371 * returned non zero from the event callback function.
372 * In either case, must tell the provider to reject.
374 cm_id_priv->state = IW_CM_STATE_DESTROYING;
375 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
376 cm_id->device->iwcm->reject(cm_id, NULL, 0);
377 spin_lock_irqsave(&cm_id_priv->lock, flags);
379 case IW_CM_STATE_CONN_SENT:
380 case IW_CM_STATE_DESTROYING:
385 if (cm_id_priv->qp) {
386 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
387 cm_id_priv->qp = NULL;
389 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
391 (void)iwcm_deref_id(cm_id_priv);
395 * This function is only called by the application thread and cannot
396 * be called by the event thread. The function will wait for all
397 * references to be released on the cm_id and then kfree the cm_id
400 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
402 struct iwcm_id_private *cm_id_priv;
404 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
405 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
407 destroy_cm_id(cm_id);
409 wait_for_completion(&cm_id_priv->destroy_comp);
411 free_cm_id(cm_id_priv);
413 EXPORT_SYMBOL(iw_destroy_cm_id);
418 * Start listening for connect requests. Generates one CONNECT_REQUEST
419 * event for each inbound connect request.
421 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
423 struct iwcm_id_private *cm_id_priv;
427 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
429 ret = alloc_work_entries(cm_id_priv, backlog);
433 spin_lock_irqsave(&cm_id_priv->lock, flags);
434 switch (cm_id_priv->state) {
435 case IW_CM_STATE_IDLE:
436 cm_id_priv->state = IW_CM_STATE_LISTEN;
437 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
438 ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
440 cm_id_priv->state = IW_CM_STATE_IDLE;
441 spin_lock_irqsave(&cm_id_priv->lock, flags);
446 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
450 EXPORT_SYMBOL(iw_cm_listen);
455 * Rejects an inbound connection request. No events are generated.
457 int iw_cm_reject(struct iw_cm_id *cm_id,
458 const void *private_data,
461 struct iwcm_id_private *cm_id_priv;
465 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
466 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
468 spin_lock_irqsave(&cm_id_priv->lock, flags);
469 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
470 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
471 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
472 wake_up_all(&cm_id_priv->connect_wait);
475 cm_id_priv->state = IW_CM_STATE_IDLE;
476 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
478 ret = cm_id->device->iwcm->reject(cm_id, private_data,
481 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
482 wake_up_all(&cm_id_priv->connect_wait);
486 EXPORT_SYMBOL(iw_cm_reject);
489 * CM_ID <-- ESTABLISHED
491 * Accepts an inbound connection request and generates an ESTABLISHED
492 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
493 * until the ESTABLISHED event is received from the provider.
495 int iw_cm_accept(struct iw_cm_id *cm_id,
496 struct iw_cm_conn_param *iw_param)
498 struct iwcm_id_private *cm_id_priv;
503 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
504 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
506 spin_lock_irqsave(&cm_id_priv->lock, flags);
507 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
508 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
509 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
510 wake_up_all(&cm_id_priv->connect_wait);
513 /* Get the ib_qp given the QPN */
514 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
516 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
517 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
518 wake_up_all(&cm_id_priv->connect_wait);
521 cm_id->device->iwcm->add_ref(qp);
523 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
525 ret = cm_id->device->iwcm->accept(cm_id, iw_param);
527 /* An error on accept precludes provider events */
528 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
529 cm_id_priv->state = IW_CM_STATE_IDLE;
530 spin_lock_irqsave(&cm_id_priv->lock, flags);
531 if (cm_id_priv->qp) {
532 cm_id->device->iwcm->rem_ref(qp);
533 cm_id_priv->qp = NULL;
535 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
536 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
537 wake_up_all(&cm_id_priv->connect_wait);
542 EXPORT_SYMBOL(iw_cm_accept);
545 * Active Side: CM_ID <-- CONN_SENT
547 * If successful, results in the generation of a CONNECT_REPLY
548 * event. iw_cm_disconnect and iw_cm_destroy will block until the
549 * CONNECT_REPLY event is received from the provider.
551 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
553 struct iwcm_id_private *cm_id_priv;
558 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
560 ret = alloc_work_entries(cm_id_priv, 4);
564 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
565 spin_lock_irqsave(&cm_id_priv->lock, flags);
567 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
568 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
569 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
570 wake_up_all(&cm_id_priv->connect_wait);
574 /* Get the ib_qp given the QPN */
575 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
577 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
578 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
579 wake_up_all(&cm_id_priv->connect_wait);
582 cm_id->device->iwcm->add_ref(qp);
584 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
585 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
587 ret = cm_id->device->iwcm->connect(cm_id, iw_param);
589 spin_lock_irqsave(&cm_id_priv->lock, flags);
590 if (cm_id_priv->qp) {
591 cm_id->device->iwcm->rem_ref(qp);
592 cm_id_priv->qp = NULL;
594 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
595 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
596 cm_id_priv->state = IW_CM_STATE_IDLE;
597 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
598 wake_up_all(&cm_id_priv->connect_wait);
603 EXPORT_SYMBOL(iw_cm_connect);
606 * Passive Side: new CM_ID <-- CONN_RECV
608 * Handles an inbound connect request. The function creates a new
609 * iw_cm_id to represent the new connection and inherits the client
610 * callback function and other attributes from the listening parent.
612 * The work item contains a pointer to the listen_cm_id and the event. The
613 * listen_cm_id contains the client cm_handler, context and
614 * device. These are copied when the device is cloned. The event
615 * contains the new four tuple.
617 * An error on the child should not affect the parent, so this
618 * function does not return a value.
620 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
621 struct iw_cm_event *iw_event)
624 struct iw_cm_id *cm_id;
625 struct iwcm_id_private *cm_id_priv;
629 * The provider should never generate a connection request
630 * event with a bad status.
632 BUG_ON(iw_event->status);
634 cm_id = iw_create_cm_id(listen_id_priv->id.device,
635 listen_id_priv->id.cm_handler,
636 listen_id_priv->id.context);
637 /* If the cm_id could not be created, ignore the request */
641 cm_id->provider_data = iw_event->provider_data;
642 cm_id->local_addr = iw_event->local_addr;
643 cm_id->remote_addr = iw_event->remote_addr;
645 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
646 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
649 * We could be destroying the listening id. If so, ignore this
652 spin_lock_irqsave(&listen_id_priv->lock, flags);
653 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
654 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
655 iw_cm_reject(cm_id, NULL, 0);
656 iw_destroy_cm_id(cm_id);
659 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
661 ret = alloc_work_entries(cm_id_priv, 3);
663 iw_cm_reject(cm_id, NULL, 0);
664 iw_destroy_cm_id(cm_id);
668 /* Call the client CM handler */
669 ret = cm_id->cm_handler(cm_id, iw_event);
671 iw_cm_reject(cm_id, NULL, 0);
672 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
673 destroy_cm_id(cm_id);
674 if (atomic_read(&cm_id_priv->refcount)==0)
675 free_cm_id(cm_id_priv);
679 if (iw_event->private_data_len)
680 kfree(iw_event->private_data);
684 * Passive Side: CM_ID <-- ESTABLISHED
686 * The provider generated an ESTABLISHED event which means that
687 * the MPA negotion has completed successfully and we are now in MPA
690 * This event can only be received in the CONN_RECV state. If the
691 * remote peer closed, the ESTABLISHED event would be received followed
692 * by the CLOSE event. If the app closes, it will block until we wake
693 * it up after processing this event.
695 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
696 struct iw_cm_event *iw_event)
701 spin_lock_irqsave(&cm_id_priv->lock, flags);
704 * We clear the CONNECT_WAIT bit here to allow the callback
705 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
706 * from a callback handler is not allowed.
708 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
709 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
710 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
711 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
712 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
713 wake_up_all(&cm_id_priv->connect_wait);
719 * Active Side: CM_ID <-- ESTABLISHED
721 * The app has called connect and is waiting for the established event to
722 * post it's requests to the server. This event will wake up anyone
723 * blocked in iw_cm_disconnect or iw_destroy_id.
725 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
726 struct iw_cm_event *iw_event)
731 spin_lock_irqsave(&cm_id_priv->lock, flags);
733 * Clear the connect wait bit so a callback function calling
734 * iw_cm_disconnect will not wait and deadlock this thread
736 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
737 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
738 if (iw_event->status == 0) {
739 cm_id_priv->id.local_addr = iw_event->local_addr;
740 cm_id_priv->id.remote_addr = iw_event->remote_addr;
741 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
743 /* REJECTED or RESET */
744 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
745 cm_id_priv->qp = NULL;
746 cm_id_priv->state = IW_CM_STATE_IDLE;
748 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
749 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
751 if (iw_event->private_data_len)
752 kfree(iw_event->private_data);
754 /* Wake up waiters on connect complete */
755 wake_up_all(&cm_id_priv->connect_wait);
763 * If in the ESTABLISHED state, move to CLOSING.
765 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
766 struct iw_cm_event *iw_event)
770 spin_lock_irqsave(&cm_id_priv->lock, flags);
771 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
772 cm_id_priv->state = IW_CM_STATE_CLOSING;
773 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
779 * If in the ESTBLISHED or CLOSING states, the QP will have have been
780 * moved by the provider to the ERR state. Disassociate the CM_ID from
781 * the QP, move to IDLE, and remove the 'connected' reference.
783 * If in some other state, the cm_id was destroyed asynchronously.
784 * This is the last reference that will result in waking up
785 * the app thread blocked in iw_destroy_cm_id.
787 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
788 struct iw_cm_event *iw_event)
792 spin_lock_irqsave(&cm_id_priv->lock, flags);
794 if (cm_id_priv->qp) {
795 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
796 cm_id_priv->qp = NULL;
798 switch (cm_id_priv->state) {
799 case IW_CM_STATE_ESTABLISHED:
800 case IW_CM_STATE_CLOSING:
801 cm_id_priv->state = IW_CM_STATE_IDLE;
802 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
803 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
804 spin_lock_irqsave(&cm_id_priv->lock, flags);
806 case IW_CM_STATE_DESTROYING:
811 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
816 static int process_event(struct iwcm_id_private *cm_id_priv,
817 struct iw_cm_event *iw_event)
821 switch (iw_event->event) {
822 case IW_CM_EVENT_CONNECT_REQUEST:
823 cm_conn_req_handler(cm_id_priv, iw_event);
825 case IW_CM_EVENT_CONNECT_REPLY:
826 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
828 case IW_CM_EVENT_ESTABLISHED:
829 ret = cm_conn_est_handler(cm_id_priv, iw_event);
831 case IW_CM_EVENT_DISCONNECT:
832 cm_disconnect_handler(cm_id_priv, iw_event);
834 case IW_CM_EVENT_CLOSE:
835 ret = cm_close_handler(cm_id_priv, iw_event);
845 * Process events on the work_list for the cm_id. If the callback
846 * function requests that the cm_id be deleted, a flag is set in the
847 * cm_id flags to indicate that when the last reference is
848 * removed, the cm_id is to be destroyed. This is necessary to
849 * distinguish between an object that will be destroyed by the app
850 * thread asleep on the destroy_comp list vs. an object destroyed
851 * here synchronously when the last reference is removed.
853 static void cm_work_handler(struct work_struct *_work)
855 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
856 struct iw_cm_event levent;
857 struct iwcm_id_private *cm_id_priv = work->cm_id;
863 spin_lock_irqsave(&cm_id_priv->lock, flags);
864 empty = list_empty(&cm_id_priv->work_list);
866 work = list_entry(cm_id_priv->work_list.next,
867 struct iwcm_work, list);
868 list_del_init(&work->list);
869 empty = list_empty(&cm_id_priv->work_list);
870 levent = work->event;
872 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
874 ret = process_event(cm_id_priv, &levent);
876 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
877 destroy_cm_id(&cm_id_priv->id);
879 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
880 destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
881 if (iwcm_deref_id(cm_id_priv)) {
883 BUG_ON(!list_empty(&cm_id_priv->work_list));
884 free_cm_id(cm_id_priv);
890 spin_lock_irqsave(&cm_id_priv->lock, flags);
892 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
896 * This function is called on interrupt context. Schedule events on
897 * the iwcm_wq thread to allow callback functions to downcall into
898 * the CM and/or block. Events are queued to a per-CM_ID
899 * work_list. If this is the first event on the work_list, the work
900 * element is also queued on the iwcm_wq thread.
902 * Each event holds a reference on the cm_id. Until the last posted
903 * event has been delivered and processed, the cm_id cannot be
907 * 0 - the event was handled.
908 * -ENOMEM - the event was not handled due to lack of resources.
910 static int cm_event_handler(struct iw_cm_id *cm_id,
911 struct iw_cm_event *iw_event)
913 struct iwcm_work *work;
914 struct iwcm_id_private *cm_id_priv;
918 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
920 spin_lock_irqsave(&cm_id_priv->lock, flags);
921 work = get_work(cm_id_priv);
927 INIT_WORK(&work->work, cm_work_handler);
928 work->cm_id = cm_id_priv;
929 work->event = *iw_event;
931 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
932 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
933 work->event.private_data_len) {
934 ret = copy_private_data(&work->event);
941 atomic_inc(&cm_id_priv->refcount);
942 if (list_empty(&cm_id_priv->work_list)) {
943 list_add_tail(&work->list, &cm_id_priv->work_list);
944 queue_work(iwcm_wq, &work->work);
946 list_add_tail(&work->list, &cm_id_priv->work_list);
948 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
952 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
953 struct ib_qp_attr *qp_attr,
959 spin_lock_irqsave(&cm_id_priv->lock, flags);
960 switch (cm_id_priv->state) {
961 case IW_CM_STATE_IDLE:
962 case IW_CM_STATE_CONN_SENT:
963 case IW_CM_STATE_CONN_RECV:
964 case IW_CM_STATE_ESTABLISHED:
965 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
966 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
967 IB_ACCESS_REMOTE_READ;
974 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
978 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
979 struct ib_qp_attr *qp_attr,
985 spin_lock_irqsave(&cm_id_priv->lock, flags);
986 switch (cm_id_priv->state) {
987 case IW_CM_STATE_IDLE:
988 case IW_CM_STATE_CONN_SENT:
989 case IW_CM_STATE_CONN_RECV:
990 case IW_CM_STATE_ESTABLISHED:
998 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1002 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1003 struct ib_qp_attr *qp_attr,
1006 struct iwcm_id_private *cm_id_priv;
1009 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1010 switch (qp_attr->qp_state) {
1013 ret = iwcm_init_qp_init_attr(cm_id_priv,
1014 qp_attr, qp_attr_mask);
1017 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1018 qp_attr, qp_attr_mask);
1026 EXPORT_SYMBOL(iw_cm_init_qp_attr);
1028 static int __init iw_cm_init(void)
1030 iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
1037 static void __exit iw_cm_cleanup(void)
1039 destroy_workqueue(iwcm_wq);
1042 module_init(iw_cm_init);
1043 module_exit(iw_cm_cleanup);