1 /* incoming call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <linux/circ_buf.h>
25 #include <net/af_rxrpc.h>
27 #include "ar-internal.h"
30 * Preallocate a single service call, connection and peer and, if possible,
31 * give them a user ID and attach the user's side of the ID to them.
33 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
34 struct rxrpc_backlog *b,
35 rxrpc_notify_rx_t notify_rx,
36 rxrpc_user_attach_call_t user_attach_call,
37 unsigned long user_call_ID, gfp_t gfp)
39 const void *here = __builtin_return_address(0);
40 struct rxrpc_call *call;
42 unsigned int size = RXRPC_BACKLOG_MAX;
43 unsigned int head, tail, call_head, call_tail;
45 max = rx->sk.sk_max_ack_backlog;
46 tmp = rx->sk.sk_ack_backlog;
48 _leave(" = -ENOBUFS [full %u]", max);
53 /* We don't need more conns and peers than we have calls, but on the
54 * other hand, we shouldn't ever use more peers than conns or conns
57 call_head = b->call_backlog_head;
58 call_tail = READ_ONCE(b->call_backlog_tail);
59 tmp = CIRC_CNT(call_head, call_tail, size);
61 _leave(" = -ENOBUFS [enough %u]", tmp);
66 head = b->peer_backlog_head;
67 tail = READ_ONCE(b->peer_backlog_tail);
68 if (CIRC_CNT(head, tail, size) < max) {
69 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
72 b->peer_backlog[head] = peer;
73 smp_store_release(&b->peer_backlog_head,
74 (head + 1) & (size - 1));
77 head = b->conn_backlog_head;
78 tail = READ_ONCE(b->conn_backlog_tail);
79 if (CIRC_CNT(head, tail, size) < max) {
80 struct rxrpc_connection *conn;
82 conn = rxrpc_prealloc_service_connection(gfp);
85 b->conn_backlog[head] = conn;
86 smp_store_release(&b->conn_backlog_head,
87 (head + 1) & (size - 1));
90 /* Now it gets complicated, because calls get registered with the
91 * socket here, particularly if a user ID is preassigned by the user.
93 call = rxrpc_alloc_call(gfp);
96 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
97 call->state = RXRPC_CALL_SERVER_PREALLOC;
99 trace_rxrpc_call(call, rxrpc_call_new_service,
100 atomic_read(&call->usage),
101 here, (const void *)user_call_ID);
103 write_lock(&rx->call_lock);
104 if (user_attach_call) {
105 struct rxrpc_call *xcall;
106 struct rb_node *parent, **pp;
108 /* Check the user ID isn't already in use */
109 pp = &rx->calls.rb_node;
113 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
114 if (user_call_ID < call->user_call_ID)
115 pp = &(*pp)->rb_left;
116 else if (user_call_ID > call->user_call_ID)
117 pp = &(*pp)->rb_right;
122 call->user_call_ID = user_call_ID;
123 call->notify_rx = notify_rx;
124 rxrpc_get_call(call, rxrpc_call_got_kernel);
125 user_attach_call(call, user_call_ID);
126 rxrpc_get_call(call, rxrpc_call_got_userid);
127 rb_link_node(&call->sock_node, parent, pp);
128 rb_insert_color(&call->sock_node, &rx->calls);
129 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
132 list_add(&call->sock_link, &rx->sock_calls);
134 write_unlock(&rx->call_lock);
136 write_lock(&rxrpc_call_lock);
137 list_add_tail(&call->link, &rxrpc_calls);
138 write_unlock(&rxrpc_call_lock);
140 b->call_backlog[call_head] = call;
141 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
142 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
146 write_unlock(&rx->call_lock);
147 rxrpc_cleanup_call(call);
148 _leave(" = -EBADSLT");
153 * Preallocate sufficient service connections, calls and peers to cover the
154 * entire backlog of a socket. When a new call comes in, if we don't have
155 * sufficient of each available, the call gets rejected as busy or ignored.
157 * The backlog is replenished when a connection is accepted or rejected.
159 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
161 struct rxrpc_backlog *b = rx->backlog;
164 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
170 if (rx->discard_new_call)
173 while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
180 * Discard the preallocation on a service.
182 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
184 struct rxrpc_backlog *b = rx->backlog;
185 unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
191 /* Make sure that there aren't any incoming calls in progress before we
192 * clear the preallocation buffers.
194 spin_lock_bh(&rx->incoming_lock);
195 spin_unlock_bh(&rx->incoming_lock);
197 head = b->peer_backlog_head;
198 tail = b->peer_backlog_tail;
199 while (CIRC_CNT(head, tail, size) > 0) {
200 struct rxrpc_peer *peer = b->peer_backlog[tail];
202 tail = (tail + 1) & (size - 1);
205 head = b->conn_backlog_head;
206 tail = b->conn_backlog_tail;
207 while (CIRC_CNT(head, tail, size) > 0) {
208 struct rxrpc_connection *conn = b->conn_backlog[tail];
209 write_lock(&rxrpc_connection_lock);
210 list_del(&conn->link);
211 list_del(&conn->proc_link);
212 write_unlock(&rxrpc_connection_lock);
214 tail = (tail + 1) & (size - 1);
217 head = b->call_backlog_head;
218 tail = b->call_backlog_tail;
219 while (CIRC_CNT(head, tail, size) > 0) {
220 struct rxrpc_call *call = b->call_backlog[tail];
221 if (rx->discard_new_call) {
222 _debug("discard %lx", call->user_call_ID);
223 rx->discard_new_call(call, call->user_call_ID);
225 rxrpc_call_completed(call);
226 rxrpc_release_call(rx, call);
227 rxrpc_put_call(call, rxrpc_call_put);
228 tail = (tail + 1) & (size - 1);
235 * Allocate a new incoming call from the prealloc pool, along with a connection
236 * and a peer as necessary.
238 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
239 struct rxrpc_local *local,
240 struct rxrpc_connection *conn,
243 struct rxrpc_backlog *b = rx->backlog;
244 struct rxrpc_peer *peer, *xpeer;
245 struct rxrpc_call *call;
246 unsigned short call_head, conn_head, peer_head;
247 unsigned short call_tail, conn_tail, peer_tail;
248 unsigned short call_count, conn_count;
250 /* #calls >= #conns >= #peers must hold true. */
251 call_head = smp_load_acquire(&b->call_backlog_head);
252 call_tail = b->call_backlog_tail;
253 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
254 conn_head = smp_load_acquire(&b->conn_backlog_head);
255 conn_tail = b->conn_backlog_tail;
256 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
257 ASSERTCMP(conn_count, >=, call_count);
258 peer_head = smp_load_acquire(&b->peer_backlog_head);
259 peer_tail = b->peer_backlog_tail;
260 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
267 /* No connection. We're going to need a peer to start off
268 * with. If one doesn't yet exist, use a spare from the
269 * preallocation set. We dump the address into the spare in
270 * anticipation - and to save on stack space.
272 xpeer = b->peer_backlog[peer_tail];
273 if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
276 peer = rxrpc_lookup_incoming_peer(local, xpeer);
278 b->peer_backlog[peer_tail] = NULL;
279 smp_store_release(&b->peer_backlog_tail,
281 (RXRPC_BACKLOG_MAX - 1));
284 /* Now allocate and set up the connection */
285 conn = b->conn_backlog[conn_tail];
286 b->conn_backlog[conn_tail] = NULL;
287 smp_store_release(&b->conn_backlog_tail,
288 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
289 rxrpc_get_local(local);
290 conn->params.local = local;
291 conn->params.peer = peer;
292 rxrpc_new_incoming_connection(conn, skb);
294 rxrpc_get_connection(conn);
297 /* And now we can allocate and set up a new call */
298 call = b->call_backlog[call_tail];
299 b->call_backlog[call_tail] = NULL;
300 smp_store_release(&b->call_backlog_tail,
301 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
303 rxrpc_see_call(call);
305 call->peer = rxrpc_get_peer(conn->params.peer);
310 * Set up a new incoming call. Called in BH context with the RCU read lock
313 * If this is for a kernel service, when we allocate the call, it will have
314 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
315 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
316 * services only have the ref from the backlog buffer. We want to pass this
317 * ref to non-BH context to dispose of.
319 * If we want to report an error, we mark the skb with the packet type and
320 * abort code and return NULL.
322 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
323 struct rxrpc_connection *conn,
326 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
327 struct rxrpc_sock *rx;
328 struct rxrpc_call *call;
332 /* Get the socket providing the service */
333 hlist_for_each_entry_rcu_bh(rx, &local->services, listen_link) {
334 if (rx->srx.srx_service == sp->hdr.serviceId)
338 trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
339 RX_INVALID_OPERATION, EOPNOTSUPP);
340 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
341 skb->priority = RX_INVALID_OPERATION;
342 _leave(" = NULL [service]");
346 spin_lock(&rx->incoming_lock);
347 if (rx->sk.sk_state == RXRPC_CLOSE) {
348 trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
349 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
350 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
351 skb->priority = RX_INVALID_OPERATION;
352 _leave(" = NULL [close]");
357 call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
359 skb->mark = RXRPC_SKB_MARK_BUSY;
360 _leave(" = NULL [busy]");
365 /* Make the call live. */
366 rxrpc_incoming_call(rx, call, skb);
369 if (rx->notify_new_call)
370 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
372 spin_lock(&conn->state_lock);
373 switch (conn->state) {
374 case RXRPC_CONN_SERVICE_UNSECURED:
375 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
376 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
377 rxrpc_queue_conn(call->conn);
380 case RXRPC_CONN_SERVICE:
381 write_lock(&call->state_lock);
382 if (rx->discard_new_call)
383 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
385 call->state = RXRPC_CALL_SERVER_ACCEPTING;
386 write_unlock(&call->state_lock);
389 case RXRPC_CONN_REMOTELY_ABORTED:
390 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
391 conn->remote_abort, ECONNABORTED);
393 case RXRPC_CONN_LOCALLY_ABORTED:
394 rxrpc_abort_call("CON", call, sp->hdr.seq,
395 conn->local_abort, ECONNABORTED);
400 spin_unlock(&conn->state_lock);
402 if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
403 rxrpc_notify_socket(call);
405 _leave(" = %p{%d}", call, call->debug_id);
407 spin_unlock(&rx->incoming_lock);
412 * handle acceptance of a call by userspace
413 * - assign the user call ID to the call at the front of the queue
415 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
416 unsigned long user_call_ID,
417 rxrpc_notify_rx_t notify_rx)
419 struct rxrpc_call *call;
420 struct rb_node *parent, **pp;
423 _enter(",%lx", user_call_ID);
425 ASSERT(!irqs_disabled());
427 write_lock(&rx->call_lock);
429 if (list_empty(&rx->to_be_accepted)) {
430 write_unlock(&rx->call_lock);
431 kleave(" = -ENODATA [empty]");
432 return ERR_PTR(-ENODATA);
435 /* check the user ID isn't already in use */
436 pp = &rx->calls.rb_node;
440 call = rb_entry(parent, struct rxrpc_call, sock_node);
442 if (user_call_ID < call->user_call_ID)
443 pp = &(*pp)->rb_left;
444 else if (user_call_ID > call->user_call_ID)
445 pp = &(*pp)->rb_right;
450 /* Dequeue the first call and check it's still valid. We gain
451 * responsibility for the queue's reference.
453 call = list_entry(rx->to_be_accepted.next,
454 struct rxrpc_call, accept_link);
455 list_del_init(&call->accept_link);
456 sk_acceptq_removed(&rx->sk);
457 rxrpc_see_call(call);
459 write_lock_bh(&call->state_lock);
460 switch (call->state) {
461 case RXRPC_CALL_SERVER_ACCEPTING:
462 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
464 case RXRPC_CALL_COMPLETE:
471 /* formalise the acceptance */
472 rxrpc_get_call(call, rxrpc_call_got);
473 call->notify_rx = notify_rx;
474 call->user_call_ID = user_call_ID;
475 rxrpc_get_call(call, rxrpc_call_got_userid);
476 rb_link_node(&call->sock_node, parent, pp);
477 rb_insert_color(&call->sock_node, &rx->calls);
478 if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
481 write_unlock_bh(&call->state_lock);
482 write_unlock(&rx->call_lock);
483 rxrpc_notify_socket(call);
484 rxrpc_service_prealloc(rx, GFP_KERNEL);
485 _leave(" = %p{%d}", call, call->debug_id);
489 _debug("release %p", call);
490 write_unlock_bh(&call->state_lock);
491 write_unlock(&rx->call_lock);
492 rxrpc_release_call(rx, call);
493 rxrpc_put_call(call, rxrpc_call_put);
498 write_unlock(&rx->call_lock);
500 rxrpc_service_prealloc(rx, GFP_KERNEL);
501 _leave(" = %d", ret);
506 * Handle rejection of a call by userspace
507 * - reject the call at the front of the queue
509 int rxrpc_reject_call(struct rxrpc_sock *rx)
511 struct rxrpc_call *call;
517 ASSERT(!irqs_disabled());
519 write_lock(&rx->call_lock);
521 if (list_empty(&rx->to_be_accepted)) {
522 write_unlock(&rx->call_lock);
526 /* Dequeue the first call and check it's still valid. We gain
527 * responsibility for the queue's reference.
529 call = list_entry(rx->to_be_accepted.next,
530 struct rxrpc_call, accept_link);
531 list_del_init(&call->accept_link);
532 sk_acceptq_removed(&rx->sk);
533 rxrpc_see_call(call);
535 write_lock_bh(&call->state_lock);
536 switch (call->state) {
537 case RXRPC_CALL_SERVER_ACCEPTING:
538 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, ECONNABORTED);
541 case RXRPC_CALL_COMPLETE:
549 write_unlock_bh(&call->state_lock);
550 write_unlock(&rx->call_lock);
552 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
553 rxrpc_release_call(rx, call);
554 rxrpc_put_call(call, rxrpc_call_put);
556 rxrpc_service_prealloc(rx, GFP_KERNEL);
557 _leave(" = %d", ret);
562 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
563 * @sock: The socket on which to preallocate
564 * @notify_rx: Event notification function for the call
565 * @user_attach_call: Func to attach call to user_call_ID
566 * @user_call_ID: The tag to attach to the preallocated call
567 * @gfp: The allocation conditions.
569 * Charge up the socket with preallocated calls, each with a user ID. A
570 * function should be provided to effect the attachment from the user's side.
571 * The user is given a ref to hold on the call.
573 * Note that the call may be come connected before this function returns.
575 int rxrpc_kernel_charge_accept(struct socket *sock,
576 rxrpc_notify_rx_t notify_rx,
577 rxrpc_user_attach_call_t user_attach_call,
578 unsigned long user_call_ID, gfp_t gfp)
580 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
581 struct rxrpc_backlog *b = rx->backlog;
583 if (sock->sk->sk_state == RXRPC_CLOSE)
586 return rxrpc_service_prealloc_one(rx, b, notify_rx,
587 user_attach_call, user_call_ID,
590 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);