2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/rbtree.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/completion.h>
47 #include <linux/slab.h>
48 #include <linux/module.h>
49 #include <linux/sysctl.h>
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_addr.h>
56 MODULE_AUTHOR("Tom Tucker");
57 MODULE_DESCRIPTION("iWARP CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static struct workqueue_struct *iwcm_wq;
62 struct work_struct work;
63 struct iwcm_id_private *cm_id;
64 struct list_head list;
65 struct iw_cm_event event;
66 struct list_head free_list;
69 static unsigned int default_backlog = 256;
71 static struct ctl_table_header *iwcm_ctl_table_hdr;
72 static struct ctl_table iwcm_ctl_table[] = {
74 .procname = "default_backlog",
75 .data = &default_backlog,
76 .maxlen = sizeof(default_backlog),
78 .proc_handler = proc_dointvec,
84 * The following services provide a mechanism for pre-allocating iwcm_work
85 * elements. The design pre-allocates them based on the cm_id type:
86 * LISTENING IDS: Get enough elements preallocated to handle the
88 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
89 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
91 * Allocating them in connect and listen avoids having to deal
92 * with allocation failures on the event upcall from the provider (which
93 * is called in the interrupt context).
95 * One exception is when creating the cm_id for incoming connection requests.
96 * There are two cases:
97 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
98 * the backlog is exceeded, then no more connection request events will
99 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
100 * to the provider to reject the connection request.
101 * 2) in the connection request workqueue handler, cm_conn_req_handler().
102 * If work elements cannot be allocated for the new connect request cm_id,
103 * then IWCM will call the provider reject method. This is ok since
104 * cm_conn_req_handler() runs in the workqueue thread context.
107 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
109 struct iwcm_work *work;
111 if (list_empty(&cm_id_priv->work_free_list))
113 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
115 list_del_init(&work->free_list);
119 static void put_work(struct iwcm_work *work)
121 list_add(&work->free_list, &work->cm_id->work_free_list);
124 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
126 struct list_head *e, *tmp;
128 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
130 kfree(list_entry(e, struct iwcm_work, free_list));
134 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
136 struct iwcm_work *work;
138 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
140 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
142 dealloc_work_entries(cm_id_priv);
145 work->cm_id = cm_id_priv;
146 INIT_LIST_HEAD(&work->list);
153 * Save private data from incoming connection requests to
154 * iw_cm_event, so the low level driver doesn't have to. Adjust
155 * the event ptr to point to the local copy.
157 static int copy_private_data(struct iw_cm_event *event)
161 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
164 event->private_data = p;
168 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
170 dealloc_work_entries(cm_id_priv);
175 * Release a reference on cm_id. If the last reference is being
176 * released, enable the waiting thread (in iw_destroy_cm_id) to
177 * get woken up, and return 1 if a thread is already waiting.
179 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
181 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
182 if (atomic_dec_and_test(&cm_id_priv->refcount)) {
183 BUG_ON(!list_empty(&cm_id_priv->work_list));
184 complete(&cm_id_priv->destroy_comp);
191 static void add_ref(struct iw_cm_id *cm_id)
193 struct iwcm_id_private *cm_id_priv;
194 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
195 atomic_inc(&cm_id_priv->refcount);
198 static void rem_ref(struct iw_cm_id *cm_id)
200 struct iwcm_id_private *cm_id_priv;
203 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
206 * Test bit before deref in case the cm_id gets freed on another
209 cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
210 if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
211 BUG_ON(!list_empty(&cm_id_priv->work_list));
212 free_cm_id(cm_id_priv);
216 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
218 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
219 iw_cm_handler cm_handler,
222 struct iwcm_id_private *cm_id_priv;
224 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
226 return ERR_PTR(-ENOMEM);
228 cm_id_priv->state = IW_CM_STATE_IDLE;
229 cm_id_priv->id.device = device;
230 cm_id_priv->id.cm_handler = cm_handler;
231 cm_id_priv->id.context = context;
232 cm_id_priv->id.event_handler = cm_event_handler;
233 cm_id_priv->id.add_ref = add_ref;
234 cm_id_priv->id.rem_ref = rem_ref;
235 spin_lock_init(&cm_id_priv->lock);
236 atomic_set(&cm_id_priv->refcount, 1);
237 init_waitqueue_head(&cm_id_priv->connect_wait);
238 init_completion(&cm_id_priv->destroy_comp);
239 INIT_LIST_HEAD(&cm_id_priv->work_list);
240 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
242 return &cm_id_priv->id;
244 EXPORT_SYMBOL(iw_create_cm_id);
247 static int iwcm_modify_qp_err(struct ib_qp *qp)
249 struct ib_qp_attr qp_attr;
254 qp_attr.qp_state = IB_QPS_ERR;
255 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
259 * This is really the RDMAC CLOSING state. It is most similar to the
262 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
264 struct ib_qp_attr qp_attr;
267 qp_attr.qp_state = IB_QPS_SQD;
268 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
274 * Block if a passive or active connection is currently being processed. Then
275 * process the event as follows:
276 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
277 * based on the abrupt flag
278 * - If the connection is already in the CLOSING or IDLE state, the peer is
279 * disconnecting concurrently with us and we've already seen the
280 * DISCONNECT event -- ignore the request and return 0
281 * - Disconnect on a listening endpoint returns -EINVAL
283 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
285 struct iwcm_id_private *cm_id_priv;
288 struct ib_qp *qp = NULL;
290 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
291 /* Wait if we're currently in a connect or accept downcall */
292 wait_event(cm_id_priv->connect_wait,
293 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
295 spin_lock_irqsave(&cm_id_priv->lock, flags);
296 switch (cm_id_priv->state) {
297 case IW_CM_STATE_ESTABLISHED:
298 cm_id_priv->state = IW_CM_STATE_CLOSING;
300 /* QP could be <nul> for user-mode client */
306 case IW_CM_STATE_LISTEN:
309 case IW_CM_STATE_CLOSING:
310 /* remote peer closed first */
311 case IW_CM_STATE_IDLE:
312 /* accept or connect returned !0 */
314 case IW_CM_STATE_CONN_RECV:
316 * App called disconnect before/without calling accept after
317 * connect_request event delivered.
320 case IW_CM_STATE_CONN_SENT:
321 /* Can only get here if wait above fails */
325 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
329 ret = iwcm_modify_qp_err(qp);
331 ret = iwcm_modify_qp_sqd(qp);
334 * If both sides are disconnecting the QP could
335 * already be in ERR or SQD states
342 EXPORT_SYMBOL(iw_cm_disconnect);
345 * CM_ID <-- DESTROYING
347 * Clean up all resources associated with the connection and release
348 * the initial reference taken by iw_create_cm_id.
350 static void destroy_cm_id(struct iw_cm_id *cm_id)
352 struct iwcm_id_private *cm_id_priv;
355 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
357 * Wait if we're currently in a connect or accept downcall. A
358 * listening endpoint should never block here.
360 wait_event(cm_id_priv->connect_wait,
361 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
363 spin_lock_irqsave(&cm_id_priv->lock, flags);
364 switch (cm_id_priv->state) {
365 case IW_CM_STATE_LISTEN:
366 cm_id_priv->state = IW_CM_STATE_DESTROYING;
367 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
368 /* destroy the listening endpoint */
369 cm_id->device->iwcm->destroy_listen(cm_id);
370 spin_lock_irqsave(&cm_id_priv->lock, flags);
372 case IW_CM_STATE_ESTABLISHED:
373 cm_id_priv->state = IW_CM_STATE_DESTROYING;
374 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
375 /* Abrupt close of the connection */
376 (void)iwcm_modify_qp_err(cm_id_priv->qp);
377 spin_lock_irqsave(&cm_id_priv->lock, flags);
379 case IW_CM_STATE_IDLE:
380 case IW_CM_STATE_CLOSING:
381 cm_id_priv->state = IW_CM_STATE_DESTROYING;
383 case IW_CM_STATE_CONN_RECV:
385 * App called destroy before/without calling accept after
386 * receiving connection request event notification or
387 * returned non zero from the event callback function.
388 * In either case, must tell the provider to reject.
390 cm_id_priv->state = IW_CM_STATE_DESTROYING;
391 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
392 cm_id->device->iwcm->reject(cm_id, NULL, 0);
393 spin_lock_irqsave(&cm_id_priv->lock, flags);
395 case IW_CM_STATE_CONN_SENT:
396 case IW_CM_STATE_DESTROYING:
401 if (cm_id_priv->qp) {
402 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
403 cm_id_priv->qp = NULL;
405 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
407 (void)iwcm_deref_id(cm_id_priv);
411 * This function is only called by the application thread and cannot
412 * be called by the event thread. The function will wait for all
413 * references to be released on the cm_id and then kfree the cm_id
416 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
418 struct iwcm_id_private *cm_id_priv;
420 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
421 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
423 destroy_cm_id(cm_id);
425 wait_for_completion(&cm_id_priv->destroy_comp);
427 free_cm_id(cm_id_priv);
429 EXPORT_SYMBOL(iw_destroy_cm_id);
434 * Start listening for connect requests. Generates one CONNECT_REQUEST
435 * event for each inbound connect request.
437 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
439 struct iwcm_id_private *cm_id_priv;
443 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
446 backlog = default_backlog;
448 ret = alloc_work_entries(cm_id_priv, backlog);
452 spin_lock_irqsave(&cm_id_priv->lock, flags);
453 switch (cm_id_priv->state) {
454 case IW_CM_STATE_IDLE:
455 cm_id_priv->state = IW_CM_STATE_LISTEN;
456 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
457 ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
459 cm_id_priv->state = IW_CM_STATE_IDLE;
460 spin_lock_irqsave(&cm_id_priv->lock, flags);
465 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
469 EXPORT_SYMBOL(iw_cm_listen);
474 * Rejects an inbound connection request. No events are generated.
476 int iw_cm_reject(struct iw_cm_id *cm_id,
477 const void *private_data,
480 struct iwcm_id_private *cm_id_priv;
484 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
485 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
487 spin_lock_irqsave(&cm_id_priv->lock, flags);
488 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
489 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
490 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
491 wake_up_all(&cm_id_priv->connect_wait);
494 cm_id_priv->state = IW_CM_STATE_IDLE;
495 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
497 ret = cm_id->device->iwcm->reject(cm_id, private_data,
500 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
501 wake_up_all(&cm_id_priv->connect_wait);
505 EXPORT_SYMBOL(iw_cm_reject);
508 * CM_ID <-- ESTABLISHED
510 * Accepts an inbound connection request and generates an ESTABLISHED
511 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
512 * until the ESTABLISHED event is received from the provider.
514 int iw_cm_accept(struct iw_cm_id *cm_id,
515 struct iw_cm_conn_param *iw_param)
517 struct iwcm_id_private *cm_id_priv;
522 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
523 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
525 spin_lock_irqsave(&cm_id_priv->lock, flags);
526 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
527 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
528 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
529 wake_up_all(&cm_id_priv->connect_wait);
532 /* Get the ib_qp given the QPN */
533 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
535 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
536 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
537 wake_up_all(&cm_id_priv->connect_wait);
540 cm_id->device->iwcm->add_ref(qp);
542 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
544 ret = cm_id->device->iwcm->accept(cm_id, iw_param);
546 /* An error on accept precludes provider events */
547 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
548 cm_id_priv->state = IW_CM_STATE_IDLE;
549 spin_lock_irqsave(&cm_id_priv->lock, flags);
550 if (cm_id_priv->qp) {
551 cm_id->device->iwcm->rem_ref(qp);
552 cm_id_priv->qp = NULL;
554 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
555 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
556 wake_up_all(&cm_id_priv->connect_wait);
561 EXPORT_SYMBOL(iw_cm_accept);
564 * Active Side: CM_ID <-- CONN_SENT
566 * If successful, results in the generation of a CONNECT_REPLY
567 * event. iw_cm_disconnect and iw_cm_destroy will block until the
568 * CONNECT_REPLY event is received from the provider.
570 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
572 struct iwcm_id_private *cm_id_priv;
577 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
579 ret = alloc_work_entries(cm_id_priv, 4);
583 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
584 spin_lock_irqsave(&cm_id_priv->lock, flags);
586 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
587 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
588 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
589 wake_up_all(&cm_id_priv->connect_wait);
593 /* Get the ib_qp given the QPN */
594 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
596 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
597 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
598 wake_up_all(&cm_id_priv->connect_wait);
601 cm_id->device->iwcm->add_ref(qp);
603 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
604 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
606 ret = cm_id->device->iwcm->connect(cm_id, iw_param);
608 spin_lock_irqsave(&cm_id_priv->lock, flags);
609 if (cm_id_priv->qp) {
610 cm_id->device->iwcm->rem_ref(qp);
611 cm_id_priv->qp = NULL;
613 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
614 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
615 cm_id_priv->state = IW_CM_STATE_IDLE;
616 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
617 wake_up_all(&cm_id_priv->connect_wait);
622 EXPORT_SYMBOL(iw_cm_connect);
625 * Passive Side: new CM_ID <-- CONN_RECV
627 * Handles an inbound connect request. The function creates a new
628 * iw_cm_id to represent the new connection and inherits the client
629 * callback function and other attributes from the listening parent.
631 * The work item contains a pointer to the listen_cm_id and the event. The
632 * listen_cm_id contains the client cm_handler, context and
633 * device. These are copied when the device is cloned. The event
634 * contains the new four tuple.
636 * An error on the child should not affect the parent, so this
637 * function does not return a value.
639 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
640 struct iw_cm_event *iw_event)
643 struct iw_cm_id *cm_id;
644 struct iwcm_id_private *cm_id_priv;
648 * The provider should never generate a connection request
649 * event with a bad status.
651 BUG_ON(iw_event->status);
653 cm_id = iw_create_cm_id(listen_id_priv->id.device,
654 listen_id_priv->id.cm_handler,
655 listen_id_priv->id.context);
656 /* If the cm_id could not be created, ignore the request */
660 cm_id->provider_data = iw_event->provider_data;
661 cm_id->local_addr = iw_event->local_addr;
662 cm_id->remote_addr = iw_event->remote_addr;
664 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
665 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
668 * We could be destroying the listening id. If so, ignore this
671 spin_lock_irqsave(&listen_id_priv->lock, flags);
672 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
673 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
674 iw_cm_reject(cm_id, NULL, 0);
675 iw_destroy_cm_id(cm_id);
678 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
680 ret = alloc_work_entries(cm_id_priv, 3);
682 iw_cm_reject(cm_id, NULL, 0);
683 iw_destroy_cm_id(cm_id);
687 /* Call the client CM handler */
688 ret = cm_id->cm_handler(cm_id, iw_event);
690 iw_cm_reject(cm_id, NULL, 0);
691 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
692 destroy_cm_id(cm_id);
693 if (atomic_read(&cm_id_priv->refcount)==0)
694 free_cm_id(cm_id_priv);
698 if (iw_event->private_data_len)
699 kfree(iw_event->private_data);
703 * Passive Side: CM_ID <-- ESTABLISHED
705 * The provider generated an ESTABLISHED event which means that
706 * the MPA negotion has completed successfully and we are now in MPA
709 * This event can only be received in the CONN_RECV state. If the
710 * remote peer closed, the ESTABLISHED event would be received followed
711 * by the CLOSE event. If the app closes, it will block until we wake
712 * it up after processing this event.
714 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
715 struct iw_cm_event *iw_event)
720 spin_lock_irqsave(&cm_id_priv->lock, flags);
723 * We clear the CONNECT_WAIT bit here to allow the callback
724 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
725 * from a callback handler is not allowed.
727 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
728 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
729 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
730 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
731 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
732 wake_up_all(&cm_id_priv->connect_wait);
738 * Active Side: CM_ID <-- ESTABLISHED
740 * The app has called connect and is waiting for the established event to
741 * post it's requests to the server. This event will wake up anyone
742 * blocked in iw_cm_disconnect or iw_destroy_id.
744 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
745 struct iw_cm_event *iw_event)
750 spin_lock_irqsave(&cm_id_priv->lock, flags);
752 * Clear the connect wait bit so a callback function calling
753 * iw_cm_disconnect will not wait and deadlock this thread
755 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
756 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
757 if (iw_event->status == 0) {
758 cm_id_priv->id.local_addr = iw_event->local_addr;
759 cm_id_priv->id.remote_addr = iw_event->remote_addr;
760 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
762 /* REJECTED or RESET */
763 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
764 cm_id_priv->qp = NULL;
765 cm_id_priv->state = IW_CM_STATE_IDLE;
767 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
768 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
770 if (iw_event->private_data_len)
771 kfree(iw_event->private_data);
773 /* Wake up waiters on connect complete */
774 wake_up_all(&cm_id_priv->connect_wait);
782 * If in the ESTABLISHED state, move to CLOSING.
784 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
785 struct iw_cm_event *iw_event)
789 spin_lock_irqsave(&cm_id_priv->lock, flags);
790 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
791 cm_id_priv->state = IW_CM_STATE_CLOSING;
792 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
798 * If in the ESTBLISHED or CLOSING states, the QP will have have been
799 * moved by the provider to the ERR state. Disassociate the CM_ID from
800 * the QP, move to IDLE, and remove the 'connected' reference.
802 * If in some other state, the cm_id was destroyed asynchronously.
803 * This is the last reference that will result in waking up
804 * the app thread blocked in iw_destroy_cm_id.
806 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
807 struct iw_cm_event *iw_event)
811 spin_lock_irqsave(&cm_id_priv->lock, flags);
813 if (cm_id_priv->qp) {
814 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
815 cm_id_priv->qp = NULL;
817 switch (cm_id_priv->state) {
818 case IW_CM_STATE_ESTABLISHED:
819 case IW_CM_STATE_CLOSING:
820 cm_id_priv->state = IW_CM_STATE_IDLE;
821 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
822 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
823 spin_lock_irqsave(&cm_id_priv->lock, flags);
825 case IW_CM_STATE_DESTROYING:
830 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
835 static int process_event(struct iwcm_id_private *cm_id_priv,
836 struct iw_cm_event *iw_event)
840 switch (iw_event->event) {
841 case IW_CM_EVENT_CONNECT_REQUEST:
842 cm_conn_req_handler(cm_id_priv, iw_event);
844 case IW_CM_EVENT_CONNECT_REPLY:
845 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
847 case IW_CM_EVENT_ESTABLISHED:
848 ret = cm_conn_est_handler(cm_id_priv, iw_event);
850 case IW_CM_EVENT_DISCONNECT:
851 cm_disconnect_handler(cm_id_priv, iw_event);
853 case IW_CM_EVENT_CLOSE:
854 ret = cm_close_handler(cm_id_priv, iw_event);
864 * Process events on the work_list for the cm_id. If the callback
865 * function requests that the cm_id be deleted, a flag is set in the
866 * cm_id flags to indicate that when the last reference is
867 * removed, the cm_id is to be destroyed. This is necessary to
868 * distinguish between an object that will be destroyed by the app
869 * thread asleep on the destroy_comp list vs. an object destroyed
870 * here synchronously when the last reference is removed.
872 static void cm_work_handler(struct work_struct *_work)
874 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
875 struct iw_cm_event levent;
876 struct iwcm_id_private *cm_id_priv = work->cm_id;
882 spin_lock_irqsave(&cm_id_priv->lock, flags);
883 empty = list_empty(&cm_id_priv->work_list);
885 work = list_entry(cm_id_priv->work_list.next,
886 struct iwcm_work, list);
887 list_del_init(&work->list);
888 empty = list_empty(&cm_id_priv->work_list);
889 levent = work->event;
891 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
893 ret = process_event(cm_id_priv, &levent);
895 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
896 destroy_cm_id(&cm_id_priv->id);
898 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
899 destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
900 if (iwcm_deref_id(cm_id_priv)) {
902 BUG_ON(!list_empty(&cm_id_priv->work_list));
903 free_cm_id(cm_id_priv);
909 spin_lock_irqsave(&cm_id_priv->lock, flags);
911 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
915 * This function is called on interrupt context. Schedule events on
916 * the iwcm_wq thread to allow callback functions to downcall into
917 * the CM and/or block. Events are queued to a per-CM_ID
918 * work_list. If this is the first event on the work_list, the work
919 * element is also queued on the iwcm_wq thread.
921 * Each event holds a reference on the cm_id. Until the last posted
922 * event has been delivered and processed, the cm_id cannot be
926 * 0 - the event was handled.
927 * -ENOMEM - the event was not handled due to lack of resources.
929 static int cm_event_handler(struct iw_cm_id *cm_id,
930 struct iw_cm_event *iw_event)
932 struct iwcm_work *work;
933 struct iwcm_id_private *cm_id_priv;
937 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
939 spin_lock_irqsave(&cm_id_priv->lock, flags);
940 work = get_work(cm_id_priv);
946 INIT_WORK(&work->work, cm_work_handler);
947 work->cm_id = cm_id_priv;
948 work->event = *iw_event;
950 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
951 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
952 work->event.private_data_len) {
953 ret = copy_private_data(&work->event);
960 atomic_inc(&cm_id_priv->refcount);
961 if (list_empty(&cm_id_priv->work_list)) {
962 list_add_tail(&work->list, &cm_id_priv->work_list);
963 queue_work(iwcm_wq, &work->work);
965 list_add_tail(&work->list, &cm_id_priv->work_list);
967 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
971 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
972 struct ib_qp_attr *qp_attr,
978 spin_lock_irqsave(&cm_id_priv->lock, flags);
979 switch (cm_id_priv->state) {
980 case IW_CM_STATE_IDLE:
981 case IW_CM_STATE_CONN_SENT:
982 case IW_CM_STATE_CONN_RECV:
983 case IW_CM_STATE_ESTABLISHED:
984 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
985 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
986 IB_ACCESS_REMOTE_READ;
993 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
997 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
998 struct ib_qp_attr *qp_attr,
1001 unsigned long flags;
1004 spin_lock_irqsave(&cm_id_priv->lock, flags);
1005 switch (cm_id_priv->state) {
1006 case IW_CM_STATE_IDLE:
1007 case IW_CM_STATE_CONN_SENT:
1008 case IW_CM_STATE_CONN_RECV:
1009 case IW_CM_STATE_ESTABLISHED:
1017 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1021 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1022 struct ib_qp_attr *qp_attr,
1025 struct iwcm_id_private *cm_id_priv;
1028 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1029 switch (qp_attr->qp_state) {
1032 ret = iwcm_init_qp_init_attr(cm_id_priv,
1033 qp_attr, qp_attr_mask);
1036 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1037 qp_attr, qp_attr_mask);
1045 EXPORT_SYMBOL(iw_cm_init_qp_attr);
1047 static int __init iw_cm_init(void)
1049 iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
1053 iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1055 if (!iwcm_ctl_table_hdr) {
1056 pr_err("iw_cm: couldn't register sysctl paths\n");
1057 destroy_workqueue(iwcm_wq);
1064 static void __exit iw_cm_cleanup(void)
1066 unregister_net_sysctl_table(iwcm_ctl_table_hdr);
1067 destroy_workqueue(iwcm_wq);
1070 module_init(iw_cm_init);
1071 module_exit(iw_cm_cleanup);