1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC individual remote procedure call handling
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/circ_buf.h>
13 #include <linux/spinlock_types.h>
15 #include <net/af_rxrpc.h>
16 #include "ar-internal.h"
18 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
20 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
21 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
22 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
26 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
27 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
28 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
29 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
30 [RXRPC_CALL_COMPLETE] = "Complete",
33 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34 [RXRPC_CALL_SUCCEEDED] = "Complete",
35 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
36 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
37 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
38 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
41 struct kmem_cache *rxrpc_call_jar;
43 static struct semaphore rxrpc_call_limiter =
44 __SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45 static struct semaphore rxrpc_kernel_call_limiter =
46 __SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
48 void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
50 struct rxrpc_local *local = call->local;
53 if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) {
54 spin_lock_bh(&local->lock);
55 busy = !list_empty(&call->attend_link);
56 trace_rxrpc_poke_call(call, busy, what);
58 rxrpc_get_call(call, rxrpc_call_get_poke);
59 list_add_tail(&call->attend_link, &local->call_attend_q);
61 spin_unlock_bh(&local->lock);
62 rxrpc_wake_up_io_thread(local);
66 static void rxrpc_call_timer_expired(struct timer_list *t)
68 struct rxrpc_call *call = from_timer(call, t, timer);
70 _enter("%d", call->debug_id);
72 if (!__rxrpc_call_is_complete(call)) {
73 trace_rxrpc_timer_expired(call, jiffies);
74 rxrpc_poke_call(call, rxrpc_call_poke_timer);
78 void rxrpc_reduce_call_timer(struct rxrpc_call *call,
79 unsigned long expire_at,
81 enum rxrpc_timer_trace why)
83 trace_rxrpc_timer(call, why, now);
84 timer_reduce(&call->timer, expire_at);
87 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
89 static void rxrpc_destroy_call(struct work_struct *);
92 * find an extant server call
93 * - called in process context with IRQs enabled
95 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
96 unsigned long user_call_ID)
98 struct rxrpc_call *call;
101 _enter("%p,%lx", rx, user_call_ID);
103 read_lock(&rx->call_lock);
105 p = rx->calls.rb_node;
107 call = rb_entry(p, struct rxrpc_call, sock_node);
109 if (user_call_ID < call->user_call_ID)
111 else if (user_call_ID > call->user_call_ID)
114 goto found_extant_call;
117 read_unlock(&rx->call_lock);
122 rxrpc_get_call(call, rxrpc_call_get_sendmsg);
123 read_unlock(&rx->call_lock);
124 _leave(" = %p [%d]", call, refcount_read(&call->ref));
129 * allocate a new call
131 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
132 unsigned int debug_id)
134 struct rxrpc_call *call;
135 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
137 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
141 mutex_init(&call->user_mutex);
143 /* Prevent lockdep reporting a deadlock false positive between the afs
144 * filesystem and sys_sendmsg() via the mmap sem.
146 if (rx->sk.sk_kern_sock)
147 lockdep_set_class(&call->user_mutex,
148 &rxrpc_call_user_mutex_lock_class_key);
150 timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
151 INIT_WORK(&call->destroyer, rxrpc_destroy_call);
152 INIT_LIST_HEAD(&call->link);
153 INIT_LIST_HEAD(&call->wait_link);
154 INIT_LIST_HEAD(&call->accept_link);
155 INIT_LIST_HEAD(&call->recvmsg_link);
156 INIT_LIST_HEAD(&call->sock_link);
157 INIT_LIST_HEAD(&call->attend_link);
158 INIT_LIST_HEAD(&call->tx_sendmsg);
159 INIT_LIST_HEAD(&call->tx_buffer);
160 skb_queue_head_init(&call->recvmsg_queue);
161 skb_queue_head_init(&call->rx_oos_queue);
162 init_waitqueue_head(&call->waitq);
163 spin_lock_init(&call->notify_lock);
164 spin_lock_init(&call->tx_lock);
165 refcount_set(&call->ref, 1);
166 call->debug_id = debug_id;
167 call->tx_total_len = -1;
168 call->next_rx_timo = 20 * HZ;
169 call->next_req_timo = 1 * HZ;
170 atomic64_set(&call->ackr_window, 0x100000001ULL);
172 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
174 call->rx_winsize = rxrpc_rx_window_size;
175 call->tx_winsize = 16;
177 if (RXRPC_TX_SMSS > 2190)
179 else if (RXRPC_TX_SMSS > 1095)
183 call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
186 call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
187 atomic_inc(&rxnet->nr_calls);
192 * Allocate a new client call.
194 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
195 struct sockaddr_rxrpc *srx,
196 struct rxrpc_conn_parameters *cp,
197 struct rxrpc_call_params *p,
199 unsigned int debug_id)
201 struct rxrpc_call *call;
207 call = rxrpc_alloc_call(rx, gfp, debug_id);
209 return ERR_PTR(-ENOMEM);
210 now = ktime_get_real();
211 call->acks_latest_ts = now;
212 call->cong_tstamp = now;
213 call->dest_srx = *srx;
214 call->interruptibility = p->interruptibility;
215 call->tx_total_len = p->tx_total_len;
216 call->key = key_get(cp->key);
217 call->local = rxrpc_get_local(cp->local, rxrpc_local_get_call);
218 call->security_level = cp->security_level;
220 __set_bit(RXRPC_CALL_KERNEL, &call->flags);
222 __set_bit(RXRPC_CALL_UPGRADE, &call->flags);
224 __set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
226 ret = rxrpc_init_client_call_security(call);
228 rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
229 rxrpc_put_call(call, rxrpc_call_put_discard_error);
233 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN);
235 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
236 p->user_call_ID, rxrpc_call_new_client);
238 _leave(" = %p", call);
243 * Initiate the call ack/resend/expiry timer.
245 void rxrpc_start_call_timer(struct rxrpc_call *call)
247 unsigned long now = jiffies;
248 unsigned long j = now + MAX_JIFFY_OFFSET;
250 call->delay_ack_at = j;
251 call->ack_lost_at = j;
254 call->keepalive_at = j;
255 call->expect_rx_by = j;
256 call->expect_req_by = j;
257 call->expect_term_by = j;
258 call->timer.expires = now;
262 * Wait for a call slot to become available.
264 static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
266 struct semaphore *limiter = &rxrpc_call_limiter;
269 limiter = &rxrpc_kernel_call_limiter;
270 if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
274 return down_interruptible(limiter) < 0 ? NULL : limiter;
278 * Release a call slot.
280 static void rxrpc_put_call_slot(struct rxrpc_call *call)
282 struct semaphore *limiter = &rxrpc_call_limiter;
284 if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
285 limiter = &rxrpc_kernel_call_limiter;
290 * Start the process of connecting a call. We obtain a peer and a connection
291 * bundle, but the actual association of a call with a connection is offloaded
292 * to the I/O thread to simplify locking.
294 static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
296 struct rxrpc_local *local = call->local;
299 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
301 call->peer = rxrpc_lookup_peer(local, &call->dest_srx, gfp);
305 ret = rxrpc_look_up_bundle(call, gfp);
309 trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call);
310 rxrpc_get_call(call, rxrpc_call_get_io_thread);
311 spin_lock(&local->client_call_lock);
312 list_add_tail(&call->wait_link, &local->new_client_calls);
313 spin_unlock(&local->client_call_lock);
314 rxrpc_wake_up_io_thread(local);
318 __set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
323 * Set up a call for the given parameters.
324 * - Called with the socket lock held, which it must release.
325 * - If it returns a call, the call's lock will need releasing by the caller.
327 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
328 struct rxrpc_conn_parameters *cp,
329 struct sockaddr_rxrpc *srx,
330 struct rxrpc_call_params *p,
332 unsigned int debug_id)
333 __releases(&rx->sk.sk_lock.slock)
334 __acquires(&call->user_mutex)
336 struct rxrpc_call *call, *xcall;
337 struct rxrpc_net *rxnet;
338 struct semaphore *limiter;
339 struct rb_node *parent, **pp;
342 _enter("%p,%lx", rx, p->user_call_ID);
344 limiter = rxrpc_get_call_slot(p, gfp);
346 release_sock(&rx->sk);
347 return ERR_PTR(-ERESTARTSYS);
350 call = rxrpc_alloc_client_call(rx, srx, cp, p, gfp, debug_id);
352 release_sock(&rx->sk);
354 _leave(" = %ld", PTR_ERR(call));
358 /* We need to protect a partially set up call against the user as we
359 * will be acting outside the socket lock.
361 mutex_lock(&call->user_mutex);
363 /* Publish the call, even though it is incompletely set up as yet */
364 write_lock(&rx->call_lock);
366 pp = &rx->calls.rb_node;
370 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
372 if (p->user_call_ID < xcall->user_call_ID)
373 pp = &(*pp)->rb_left;
374 else if (p->user_call_ID > xcall->user_call_ID)
375 pp = &(*pp)->rb_right;
377 goto error_dup_user_ID;
380 rcu_assign_pointer(call->socket, rx);
381 call->user_call_ID = p->user_call_ID;
382 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
383 rxrpc_get_call(call, rxrpc_call_get_userid);
384 rb_link_node(&call->sock_node, parent, pp);
385 rb_insert_color(&call->sock_node, &rx->calls);
386 list_add(&call->sock_link, &rx->sock_calls);
388 write_unlock(&rx->call_lock);
391 spin_lock(&rxnet->call_lock);
392 list_add_tail_rcu(&call->link, &rxnet->calls);
393 spin_unlock(&rxnet->call_lock);
395 /* From this point on, the call is protected by its own lock. */
396 release_sock(&rx->sk);
398 /* Set up or get a connection record and set the protocol parameters,
399 * including channel number and call ID.
401 ret = rxrpc_connect_call(call, gfp);
403 goto error_attached_to_socket;
405 _leave(" = %p [new]", call);
408 /* We unexpectedly found the user ID in the list after taking
409 * the call_lock. This shouldn't happen unless the user races
410 * with itself and tries to add the same user ID twice at the
411 * same time in different threads.
414 write_unlock(&rx->call_lock);
415 release_sock(&rx->sk);
416 rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST);
417 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0,
418 rxrpc_call_see_userid_exists);
419 mutex_unlock(&call->user_mutex);
420 rxrpc_put_call(call, rxrpc_call_put_userid_exists);
421 _leave(" = -EEXIST");
422 return ERR_PTR(-EEXIST);
424 /* We got an error, but the call is attached to the socket and is in
425 * need of release. However, we might now race with recvmsg() when it
426 * completion notifies the socket. Return 0 from sys_sendmsg() and
427 * leave the error to recvmsg() to deal with.
429 error_attached_to_socket:
430 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret,
431 rxrpc_call_see_connect_failed);
432 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
433 _leave(" = c=%08x [err]", call->debug_id);
438 * Set up an incoming call. call->conn points to the connection.
439 * This is called in BH context and isn't allowed to fail.
441 void rxrpc_incoming_call(struct rxrpc_sock *rx,
442 struct rxrpc_call *call,
445 struct rxrpc_connection *conn = call->conn;
446 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
449 _enter(",%d", call->conn->debug_id);
451 rcu_assign_pointer(call->socket, rx);
452 call->call_id = sp->hdr.callNumber;
453 call->dest_srx.srx_service = sp->hdr.serviceId;
454 call->cid = sp->hdr.cid;
455 call->cong_tstamp = skb->tstamp;
457 __set_bit(RXRPC_CALL_EXPOSED, &call->flags);
458 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
460 spin_lock(&conn->state_lock);
462 switch (conn->state) {
463 case RXRPC_CONN_SERVICE_UNSECURED:
464 case RXRPC_CONN_SERVICE_CHALLENGING:
465 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
467 case RXRPC_CONN_SERVICE:
468 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
471 case RXRPC_CONN_ABORTED:
472 rxrpc_set_call_completion(call, conn->completion,
473 conn->abort_code, conn->error);
479 rxrpc_get_call(call, rxrpc_call_get_io_thread);
481 /* Set the channel for this call. We don't get channel_lock as we're
482 * only defending against the data_ready handler (which we're called
483 * from) and the RESPONSE packet parser (which is only really
484 * interested in call_counter and can cope with a disagreement with the
487 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
488 conn->channels[chan].call_counter = call->call_id;
489 conn->channels[chan].call_id = call->call_id;
490 conn->channels[chan].call = call;
491 spin_unlock(&conn->state_lock);
493 spin_lock(&conn->peer->lock);
494 hlist_add_head(&call->error_link, &conn->peer->error_targets);
495 spin_unlock(&conn->peer->lock);
497 rxrpc_start_call_timer(call);
502 * Note the re-emergence of a call.
504 void rxrpc_see_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
507 int r = refcount_read(&call->ref);
509 trace_rxrpc_call(call->debug_id, r, 0, why);
513 struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *call,
514 enum rxrpc_call_trace why)
518 if (!call || !__refcount_inc_not_zero(&call->ref, &r))
520 trace_rxrpc_call(call->debug_id, r + 1, 0, why);
525 * Note the addition of a ref on a call.
527 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
531 __refcount_inc(&call->ref, &r);
532 trace_rxrpc_call(call->debug_id, r + 1, 0, why);
536 * Clean up the Rx skb ring.
538 static void rxrpc_cleanup_ring(struct rxrpc_call *call)
540 skb_queue_purge(&call->recvmsg_queue);
541 skb_queue_purge(&call->rx_oos_queue);
545 * Detach a call from its owning socket.
547 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
549 struct rxrpc_connection *conn = call->conn;
550 bool put = false, putu = false;
552 _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
554 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
555 call->flags, rxrpc_call_see_release);
557 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
560 rxrpc_put_call_slot(call);
562 /* Make sure we don't get any more notifications */
563 write_lock(&rx->recvmsg_lock);
565 if (!list_empty(&call->recvmsg_link)) {
566 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
567 call, call->events, call->flags);
568 list_del(&call->recvmsg_link);
572 /* list_empty() must return false in rxrpc_notify_socket() */
573 call->recvmsg_link.next = NULL;
574 call->recvmsg_link.prev = NULL;
576 write_unlock(&rx->recvmsg_lock);
578 rxrpc_put_call(call, rxrpc_call_put_unnotify);
580 write_lock(&rx->call_lock);
582 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
583 rb_erase(&call->sock_node, &rx->calls);
584 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
588 list_del(&call->sock_link);
589 write_unlock(&rx->call_lock);
591 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
594 rxrpc_put_call(call, rxrpc_call_put_userid);
600 * release all the calls associated with a socket
602 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
604 struct rxrpc_call *call;
608 while (!list_empty(&rx->to_be_accepted)) {
609 call = list_entry(rx->to_be_accepted.next,
610 struct rxrpc_call, accept_link);
611 list_del(&call->accept_link);
612 rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
613 rxrpc_abort_call_sock_release_tba);
614 rxrpc_put_call(call, rxrpc_call_put_release_sock_tba);
617 while (!list_empty(&rx->sock_calls)) {
618 call = list_entry(rx->sock_calls.next,
619 struct rxrpc_call, sock_link);
620 rxrpc_get_call(call, rxrpc_call_get_release_sock);
621 rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
622 rxrpc_abort_call_sock_release);
623 rxrpc_release_call(rx, call);
624 rxrpc_put_call(call, rxrpc_call_put_release_sock);
633 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
635 struct rxrpc_net *rxnet = call->rxnet;
636 unsigned int debug_id = call->debug_id;
640 ASSERT(call != NULL);
642 dead = __refcount_dec_and_test(&call->ref, &r);
643 trace_rxrpc_call(debug_id, r - 1, 0, why);
645 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
647 if (!list_empty(&call->link)) {
648 spin_lock(&rxnet->call_lock);
649 list_del_init(&call->link);
650 spin_unlock(&rxnet->call_lock);
653 rxrpc_cleanup_call(call);
658 * Free up the call under RCU.
660 static void rxrpc_rcu_free_call(struct rcu_head *rcu)
662 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
663 struct rxrpc_net *rxnet = READ_ONCE(call->rxnet);
665 kmem_cache_free(rxrpc_call_jar, call);
666 if (atomic_dec_and_test(&rxnet->nr_calls))
667 wake_up_var(&rxnet->nr_calls);
671 * Final call destruction - but must be done in process context.
673 static void rxrpc_destroy_call(struct work_struct *work)
675 struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer);
676 struct rxrpc_txbuf *txb;
678 del_timer_sync(&call->timer);
680 rxrpc_cleanup_ring(call);
681 while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
682 struct rxrpc_txbuf, call_link))) {
683 list_del(&txb->call_link);
684 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
686 while ((txb = list_first_entry_or_null(&call->tx_buffer,
687 struct rxrpc_txbuf, call_link))) {
688 list_del(&txb->call_link);
689 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
692 rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
693 rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
694 rxrpc_deactivate_bundle(call->bundle);
695 rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call);
696 rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
697 rxrpc_put_local(call->local, rxrpc_local_put_call);
698 call_rcu(&call->rcu, rxrpc_rcu_free_call);
704 void rxrpc_cleanup_call(struct rxrpc_call *call)
706 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
708 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
709 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
711 del_timer(&call->timer);
713 if (rcu_read_lock_held())
714 /* Can't use the rxrpc workqueue as we need to cancel/flush
715 * something that may be running/waiting there.
717 schedule_work(&call->destroyer);
719 rxrpc_destroy_call(&call->destroyer);
723 * Make sure that all calls are gone from a network namespace. To reach this
724 * point, any open UDP sockets in that namespace must have been closed, so any
725 * outstanding calls cannot be doing I/O.
727 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
729 struct rxrpc_call *call;
733 if (!list_empty(&rxnet->calls)) {
734 spin_lock(&rxnet->call_lock);
736 while (!list_empty(&rxnet->calls)) {
737 call = list_entry(rxnet->calls.next,
738 struct rxrpc_call, link);
739 _debug("Zapping call %p", call);
741 rxrpc_see_call(call, rxrpc_call_see_zap);
742 list_del_init(&call->link);
744 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
745 call, refcount_read(&call->ref),
746 rxrpc_call_states[__rxrpc_call_state(call)],
747 call->flags, call->events);
749 spin_unlock(&rxnet->call_lock);
751 spin_lock(&rxnet->call_lock);
754 spin_unlock(&rxnet->call_lock);
757 atomic_dec(&rxnet->nr_calls);
758 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));