Commit | Line | Data |
---|---|---|
17926a79 DH |
1 | /* incoming call handling |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
9b6d5398 JP |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | ||
17926a79 DH |
14 | #include <linux/module.h> |
15 | #include <linux/net.h> | |
16 | #include <linux/skbuff.h> | |
17 | #include <linux/errqueue.h> | |
18 | #include <linux/udp.h> | |
19 | #include <linux/in.h> | |
20 | #include <linux/in6.h> | |
21 | #include <linux/icmp.h> | |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
00e90712 | 23 | #include <linux/circ_buf.h> |
17926a79 DH |
24 | #include <net/sock.h> |
25 | #include <net/af_rxrpc.h> | |
26 | #include <net/ip.h> | |
27 | #include "ar-internal.h" | |
28 | ||
00e90712 DH |
29 | /* |
30 | * Preallocate a single service call, connection and peer and, if possible, | |
31 | * give them a user ID and attach the user's side of the ID to them. | |
32 | */ | |
33 | static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |
34 | struct rxrpc_backlog *b, | |
35 | rxrpc_notify_rx_t notify_rx, | |
36 | rxrpc_user_attach_call_t user_attach_call, | |
a25e21f0 DH |
37 | unsigned long user_call_ID, gfp_t gfp, |
38 | unsigned int debug_id) | |
00e90712 DH |
39 | { |
40 | const void *here = __builtin_return_address(0); | |
41 | struct rxrpc_call *call; | |
2baec2c3 | 42 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e90712 DH |
43 | int max, tmp; |
44 | unsigned int size = RXRPC_BACKLOG_MAX; | |
45 | unsigned int head, tail, call_head, call_tail; | |
46 | ||
47 | max = rx->sk.sk_max_ack_backlog; | |
48 | tmp = rx->sk.sk_ack_backlog; | |
49 | if (tmp >= max) { | |
50 | _leave(" = -ENOBUFS [full %u]", max); | |
51 | return -ENOBUFS; | |
52 | } | |
53 | max -= tmp; | |
54 | ||
55 | /* We don't need more conns and peers than we have calls, but on the | |
56 | * other hand, we shouldn't ever use more peers than conns or conns | |
57 | * than calls. | |
58 | */ | |
59 | call_head = b->call_backlog_head; | |
60 | call_tail = READ_ONCE(b->call_backlog_tail); | |
61 | tmp = CIRC_CNT(call_head, call_tail, size); | |
62 | if (tmp >= max) { | |
63 | _leave(" = -ENOBUFS [enough %u]", tmp); | |
64 | return -ENOBUFS; | |
65 | } | |
66 | max = tmp + 1; | |
67 | ||
68 | head = b->peer_backlog_head; | |
69 | tail = READ_ONCE(b->peer_backlog_tail); | |
70 | if (CIRC_CNT(head, tail, size) < max) { | |
71 | struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); | |
72 | if (!peer) | |
73 | return -ENOMEM; | |
74 | b->peer_backlog[head] = peer; | |
75 | smp_store_release(&b->peer_backlog_head, | |
76 | (head + 1) & (size - 1)); | |
77 | } | |
78 | ||
79 | head = b->conn_backlog_head; | |
80 | tail = READ_ONCE(b->conn_backlog_tail); | |
81 | if (CIRC_CNT(head, tail, size) < max) { | |
82 | struct rxrpc_connection *conn; | |
83 | ||
2baec2c3 | 84 | conn = rxrpc_prealloc_service_connection(rxnet, gfp); |
00e90712 DH |
85 | if (!conn) |
86 | return -ENOMEM; | |
87 | b->conn_backlog[head] = conn; | |
88 | smp_store_release(&b->conn_backlog_head, | |
89 | (head + 1) & (size - 1)); | |
363deeab DH |
90 | |
91 | trace_rxrpc_conn(conn, rxrpc_conn_new_service, | |
92 | atomic_read(&conn->usage), here); | |
00e90712 DH |
93 | } |
94 | ||
95 | /* Now it gets complicated, because calls get registered with the | |
96 | * socket here, particularly if a user ID is preassigned by the user. | |
97 | */ | |
a25e21f0 | 98 | call = rxrpc_alloc_call(rx, gfp, debug_id); |
00e90712 DH |
99 | if (!call) |
100 | return -ENOMEM; | |
101 | call->flags |= (1 << RXRPC_CALL_IS_SERVICE); | |
102 | call->state = RXRPC_CALL_SERVER_PREALLOC; | |
103 | ||
104 | trace_rxrpc_call(call, rxrpc_call_new_service, | |
105 | atomic_read(&call->usage), | |
106 | here, (const void *)user_call_ID); | |
107 | ||
108 | write_lock(&rx->call_lock); | |
109 | if (user_attach_call) { | |
110 | struct rxrpc_call *xcall; | |
111 | struct rb_node *parent, **pp; | |
112 | ||
113 | /* Check the user ID isn't already in use */ | |
114 | pp = &rx->calls.rb_node; | |
115 | parent = NULL; | |
116 | while (*pp) { | |
117 | parent = *pp; | |
118 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); | |
c01f6c9b | 119 | if (user_call_ID < xcall->user_call_ID) |
00e90712 | 120 | pp = &(*pp)->rb_left; |
c01f6c9b | 121 | else if (user_call_ID > xcall->user_call_ID) |
00e90712 DH |
122 | pp = &(*pp)->rb_right; |
123 | else | |
124 | goto id_in_use; | |
125 | } | |
126 | ||
127 | call->user_call_ID = user_call_ID; | |
128 | call->notify_rx = notify_rx; | |
cbd00891 | 129 | rxrpc_get_call(call, rxrpc_call_got_kernel); |
00e90712 DH |
130 | user_attach_call(call, user_call_ID); |
131 | rxrpc_get_call(call, rxrpc_call_got_userid); | |
132 | rb_link_node(&call->sock_node, parent, pp); | |
133 | rb_insert_color(&call->sock_node, &rx->calls); | |
134 | set_bit(RXRPC_CALL_HAS_USERID, &call->flags); | |
135 | } | |
136 | ||
248f219c DH |
137 | list_add(&call->sock_link, &rx->sock_calls); |
138 | ||
00e90712 DH |
139 | write_unlock(&rx->call_lock); |
140 | ||
d3be4d24 | 141 | rxnet = call->rxnet; |
2baec2c3 DH |
142 | write_lock(&rxnet->call_lock); |
143 | list_add_tail(&call->link, &rxnet->calls); | |
144 | write_unlock(&rxnet->call_lock); | |
00e90712 DH |
145 | |
146 | b->call_backlog[call_head] = call; | |
147 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); | |
148 | _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); | |
149 | return 0; | |
150 | ||
151 | id_in_use: | |
152 | write_unlock(&rx->call_lock); | |
153 | rxrpc_cleanup_call(call); | |
154 | _leave(" = -EBADSLT"); | |
155 | return -EBADSLT; | |
156 | } | |
157 | ||
158 | /* | |
159 | * Preallocate sufficient service connections, calls and peers to cover the | |
160 | * entire backlog of a socket. When a new call comes in, if we don't have | |
161 | * sufficient of each available, the call gets rejected as busy or ignored. | |
162 | * | |
163 | * The backlog is replenished when a connection is accepted or rejected. | |
164 | */ | |
165 | int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) | |
166 | { | |
167 | struct rxrpc_backlog *b = rx->backlog; | |
168 | ||
169 | if (!b) { | |
170 | b = kzalloc(sizeof(struct rxrpc_backlog), gfp); | |
171 | if (!b) | |
172 | return -ENOMEM; | |
173 | rx->backlog = b; | |
174 | } | |
175 | ||
176 | if (rx->discard_new_call) | |
177 | return 0; | |
178 | ||
a25e21f0 DH |
179 | while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, |
180 | atomic_inc_return(&rxrpc_debug_id)) == 0) | |
00e90712 DH |
181 | ; |
182 | ||
183 | return 0; | |
184 | } | |
185 | ||
186 | /* | |
187 | * Discard the preallocation on a service. | |
188 | */ | |
189 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) | |
190 | { | |
191 | struct rxrpc_backlog *b = rx->backlog; | |
2baec2c3 | 192 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e90712 DH |
193 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; |
194 | ||
195 | if (!b) | |
196 | return; | |
197 | rx->backlog = NULL; | |
198 | ||
248f219c DH |
199 | /* Make sure that there aren't any incoming calls in progress before we |
200 | * clear the preallocation buffers. | |
201 | */ | |
202 | spin_lock_bh(&rx->incoming_lock); | |
203 | spin_unlock_bh(&rx->incoming_lock); | |
204 | ||
00e90712 DH |
205 | head = b->peer_backlog_head; |
206 | tail = b->peer_backlog_tail; | |
207 | while (CIRC_CNT(head, tail, size) > 0) { | |
208 | struct rxrpc_peer *peer = b->peer_backlog[tail]; | |
209 | kfree(peer); | |
210 | tail = (tail + 1) & (size - 1); | |
211 | } | |
212 | ||
213 | head = b->conn_backlog_head; | |
214 | tail = b->conn_backlog_tail; | |
215 | while (CIRC_CNT(head, tail, size) > 0) { | |
216 | struct rxrpc_connection *conn = b->conn_backlog[tail]; | |
2baec2c3 | 217 | write_lock(&rxnet->conn_lock); |
00e90712 DH |
218 | list_del(&conn->link); |
219 | list_del(&conn->proc_link); | |
2baec2c3 | 220 | write_unlock(&rxnet->conn_lock); |
00e90712 | 221 | kfree(conn); |
31f5f9a1 | 222 | if (atomic_dec_and_test(&rxnet->nr_conns)) |
5bb053be | 223 | wake_up_var(&rxnet->nr_conns); |
00e90712 DH |
224 | tail = (tail + 1) & (size - 1); |
225 | } | |
226 | ||
227 | head = b->call_backlog_head; | |
228 | tail = b->call_backlog_tail; | |
229 | while (CIRC_CNT(head, tail, size) > 0) { | |
230 | struct rxrpc_call *call = b->call_backlog[tail]; | |
88f2a825 | 231 | rcu_assign_pointer(call->socket, rx); |
00e90712 DH |
232 | if (rx->discard_new_call) { |
233 | _debug("discard %lx", call->user_call_ID); | |
234 | rx->discard_new_call(call, call->user_call_ID); | |
3432a757 | 235 | rxrpc_put_call(call, rxrpc_call_put_kernel); |
00e90712 DH |
236 | } |
237 | rxrpc_call_completed(call); | |
238 | rxrpc_release_call(rx, call); | |
239 | rxrpc_put_call(call, rxrpc_call_put); | |
240 | tail = (tail + 1) & (size - 1); | |
241 | } | |
242 | ||
243 | kfree(b); | |
244 | } | |
245 | ||
17926a79 | 246 | /* |
248f219c DH |
247 | * Allocate a new incoming call from the prealloc pool, along with a connection |
248 | * and a peer as necessary. | |
17926a79 | 249 | */ |
248f219c DH |
250 | static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, |
251 | struct rxrpc_local *local, | |
0099dc58 | 252 | struct rxrpc_peer *peer, |
248f219c DH |
253 | struct rxrpc_connection *conn, |
254 | struct sk_buff *skb) | |
17926a79 | 255 | { |
248f219c | 256 | struct rxrpc_backlog *b = rx->backlog; |
248f219c DH |
257 | struct rxrpc_call *call; |
258 | unsigned short call_head, conn_head, peer_head; | |
259 | unsigned short call_tail, conn_tail, peer_tail; | |
260 | unsigned short call_count, conn_count; | |
261 | ||
262 | /* #calls >= #conns >= #peers must hold true. */ | |
263 | call_head = smp_load_acquire(&b->call_backlog_head); | |
264 | call_tail = b->call_backlog_tail; | |
265 | call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); | |
266 | conn_head = smp_load_acquire(&b->conn_backlog_head); | |
267 | conn_tail = b->conn_backlog_tail; | |
268 | conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); | |
269 | ASSERTCMP(conn_count, >=, call_count); | |
270 | peer_head = smp_load_acquire(&b->peer_backlog_head); | |
271 | peer_tail = b->peer_backlog_tail; | |
272 | ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, | |
273 | conn_count); | |
274 | ||
275 | if (call_count == 0) | |
276 | return NULL; | |
277 | ||
278 | if (!conn) { | |
0099dc58 DH |
279 | if (peer && !rxrpc_get_peer_maybe(peer)) |
280 | peer = NULL; | |
281 | if (!peer) { | |
282 | peer = b->peer_backlog[peer_tail]; | |
5a790b73 | 283 | if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0) |
0099dc58 | 284 | return NULL; |
248f219c DH |
285 | b->peer_backlog[peer_tail] = NULL; |
286 | smp_store_release(&b->peer_backlog_tail, | |
287 | (peer_tail + 1) & | |
288 | (RXRPC_BACKLOG_MAX - 1)); | |
0099dc58 | 289 | |
5e33a23b | 290 | rxrpc_new_incoming_peer(rx, local, peer); |
248f219c | 291 | } |
17926a79 | 292 | |
248f219c DH |
293 | /* Now allocate and set up the connection */ |
294 | conn = b->conn_backlog[conn_tail]; | |
295 | b->conn_backlog[conn_tail] = NULL; | |
296 | smp_store_release(&b->conn_backlog_tail, | |
297 | (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
09d2bf59 | 298 | conn->params.local = rxrpc_get_local(local); |
248f219c | 299 | conn->params.peer = peer; |
363deeab | 300 | rxrpc_see_connection(conn); |
4722974d | 301 | rxrpc_new_incoming_connection(rx, conn, skb); |
248f219c DH |
302 | } else { |
303 | rxrpc_get_connection(conn); | |
17926a79 DH |
304 | } |
305 | ||
248f219c DH |
306 | /* And now we can allocate and set up a new call */ |
307 | call = b->call_backlog[call_tail]; | |
308 | b->call_backlog[call_tail] = NULL; | |
309 | smp_store_release(&b->call_backlog_tail, | |
310 | (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
311 | ||
cbd00891 | 312 | rxrpc_see_call(call); |
248f219c DH |
313 | call->conn = conn; |
314 | call->peer = rxrpc_get_peer(conn->params.peer); | |
f7aec129 | 315 | call->cong_cwnd = call->peer->cong_cwnd; |
248f219c | 316 | return call; |
17926a79 DH |
317 | } |
318 | ||
319 | /* | |
248f219c DH |
320 | * Set up a new incoming call. Called in BH context with the RCU read lock |
321 | * held. | |
322 | * | |
323 | * If this is for a kernel service, when we allocate the call, it will have | |
324 | * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the | |
325 | * retainer ref obtained from the backlog buffer. Prealloc calls for userspace | |
326 | * services only have the ref from the backlog buffer. We want to pass this | |
327 | * ref to non-BH context to dispose of. | |
328 | * | |
329 | * If we want to report an error, we mark the skb with the packet type and | |
330 | * abort code and return NULL. | |
540b1c48 DH |
331 | * |
332 | * The call is returned with the user access mutex held. | |
17926a79 | 333 | */ |
248f219c | 334 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
0099dc58 | 335 | struct rxrpc_sock *rx, |
248f219c | 336 | struct sk_buff *skb) |
17926a79 | 337 | { |
248f219c | 338 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
c1e15b49 | 339 | struct rxrpc_connection *conn; |
d7b4c24f | 340 | struct rxrpc_peer *peer = NULL; |
17926a79 | 341 | struct rxrpc_call *call; |
17926a79 DH |
342 | |
343 | _enter(""); | |
344 | ||
248f219c | 345 | spin_lock(&rx->incoming_lock); |
210f0353 DH |
346 | if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || |
347 | rx->sk.sk_state == RXRPC_CLOSE) { | |
a25e21f0 | 348 | trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber, |
248f219c | 349 | sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); |
ece64fec | 350 | skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; |
248f219c DH |
351 | skb->priority = RX_INVALID_OPERATION; |
352 | _leave(" = NULL [close]"); | |
353 | call = NULL; | |
354 | goto out; | |
17926a79 | 355 | } |
17926a79 | 356 | |
c1e15b49 DH |
357 | /* The peer, connection and call may all have sprung into existence due |
358 | * to a duplicate packet being handled on another CPU in parallel, so | |
359 | * we have to recheck the routing. However, we're now holding | |
360 | * rx->incoming_lock, so the values should remain stable. | |
361 | */ | |
362 | conn = rxrpc_find_connection_rcu(local, skb, &peer); | |
363 | ||
0099dc58 | 364 | call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); |
248f219c | 365 | if (!call) { |
ece64fec | 366 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; |
248f219c DH |
367 | _leave(" = NULL [busy]"); |
368 | call = NULL; | |
369 | goto out; | |
370 | } | |
58dc63c9 DH |
371 | |
372 | trace_rxrpc_receive(call, rxrpc_receive_incoming, | |
373 | sp->hdr.serial, sp->hdr.seq); | |
17926a79 | 374 | |
540b1c48 DH |
375 | /* Lock the call to prevent rxrpc_kernel_send/recv_data() and |
376 | * sendmsg()/recvmsg() inconveniently stealing the mutex once the | |
377 | * notification is generated. | |
378 | * | |
379 | * The BUG should never happen because the kernel should be well | |
380 | * behaved enough not to access the call before the first notification | |
381 | * event and userspace is prevented from doing so until the state is | |
382 | * appropriate. | |
383 | */ | |
384 | if (!mutex_trylock(&call->user_mutex)) | |
385 | BUG(); | |
386 | ||
248f219c DH |
387 | /* Make the call live. */ |
388 | rxrpc_incoming_call(rx, call, skb); | |
389 | conn = call->conn; | |
17926a79 | 390 | |
248f219c DH |
391 | if (rx->notify_new_call) |
392 | rx->notify_new_call(&rx->sk, call, call->user_call_ID); | |
e6f3afb3 DH |
393 | else |
394 | sk_acceptq_added(&rx->sk); | |
17926a79 | 395 | |
248f219c DH |
396 | spin_lock(&conn->state_lock); |
397 | switch (conn->state) { | |
398 | case RXRPC_CONN_SERVICE_UNSECURED: | |
399 | conn->state = RXRPC_CONN_SERVICE_CHALLENGING; | |
400 | set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); | |
401 | rxrpc_queue_conn(call->conn); | |
402 | break; | |
17926a79 | 403 | |
248f219c DH |
404 | case RXRPC_CONN_SERVICE: |
405 | write_lock(&call->state_lock); | |
c1e15b49 DH |
406 | if (call->state < RXRPC_CALL_COMPLETE) { |
407 | if (rx->discard_new_call) | |
408 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | |
409 | else | |
410 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | |
411 | } | |
248f219c DH |
412 | write_unlock(&call->state_lock); |
413 | break; | |
17926a79 | 414 | |
248f219c DH |
415 | case RXRPC_CONN_REMOTELY_ABORTED: |
416 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, | |
64753092 | 417 | conn->abort_code, conn->error); |
248f219c DH |
418 | break; |
419 | case RXRPC_CONN_LOCALLY_ABORTED: | |
420 | rxrpc_abort_call("CON", call, sp->hdr.seq, | |
64753092 | 421 | conn->abort_code, conn->error); |
248f219c | 422 | break; |
17926a79 DH |
423 | default: |
424 | BUG(); | |
425 | } | |
248f219c | 426 | spin_unlock(&conn->state_lock); |
17926a79 | 427 | |
248f219c DH |
428 | if (call->state == RXRPC_CALL_SERVER_ACCEPTING) |
429 | rxrpc_notify_socket(call); | |
d991b4a3 | 430 | |
3432a757 DH |
431 | /* We have to discard the prealloc queue's ref here and rely on a |
432 | * combination of the RCU read lock and refs held either by the socket | |
433 | * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel | |
434 | * service to prevent the call from being deallocated too early. | |
435 | */ | |
436 | rxrpc_put_call(call, rxrpc_call_put); | |
437 | ||
248f219c DH |
438 | _leave(" = %p{%d}", call, call->debug_id); |
439 | out: | |
440 | spin_unlock(&rx->incoming_lock); | |
441 | return call; | |
17926a79 DH |
442 | } |
443 | ||
444 | /* | |
445 | * handle acceptance of a call by userspace | |
446 | * - assign the user call ID to the call at the front of the queue | |
540b1c48 | 447 | * - called with the socket locked. |
17926a79 | 448 | */ |
651350d1 | 449 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, |
d001648e DH |
450 | unsigned long user_call_ID, |
451 | rxrpc_notify_rx_t notify_rx) | |
540b1c48 | 452 | __releases(&rx->sk.sk_lock.slock) |
88f2a825 | 453 | __acquires(call->user_mutex) |
17926a79 DH |
454 | { |
455 | struct rxrpc_call *call; | |
456 | struct rb_node *parent, **pp; | |
457 | int ret; | |
458 | ||
459 | _enter(",%lx", user_call_ID); | |
460 | ||
461 | ASSERT(!irqs_disabled()); | |
462 | ||
463 | write_lock(&rx->call_lock); | |
464 | ||
b25de360 DH |
465 | if (list_empty(&rx->to_be_accepted)) { |
466 | write_unlock(&rx->call_lock); | |
540b1c48 | 467 | release_sock(&rx->sk); |
b25de360 DH |
468 | kleave(" = -ENODATA [empty]"); |
469 | return ERR_PTR(-ENODATA); | |
470 | } | |
17926a79 DH |
471 | |
472 | /* check the user ID isn't already in use */ | |
17926a79 DH |
473 | pp = &rx->calls.rb_node; |
474 | parent = NULL; | |
475 | while (*pp) { | |
476 | parent = *pp; | |
477 | call = rb_entry(parent, struct rxrpc_call, sock_node); | |
478 | ||
479 | if (user_call_ID < call->user_call_ID) | |
480 | pp = &(*pp)->rb_left; | |
481 | else if (user_call_ID > call->user_call_ID) | |
482 | pp = &(*pp)->rb_right; | |
483 | else | |
248f219c | 484 | goto id_in_use; |
17926a79 DH |
485 | } |
486 | ||
248f219c DH |
487 | /* Dequeue the first call and check it's still valid. We gain |
488 | * responsibility for the queue's reference. | |
489 | */ | |
490 | call = list_entry(rx->to_be_accepted.next, | |
491 | struct rxrpc_call, accept_link); | |
540b1c48 DH |
492 | write_unlock(&rx->call_lock); |
493 | ||
494 | /* We need to gain the mutex from the interrupt handler without | |
495 | * upsetting lockdep, so we have to release it there and take it here. | |
496 | * We are, however, still holding the socket lock, so other accepts | |
497 | * must wait for us and no one can add the user ID behind our backs. | |
498 | */ | |
499 | if (mutex_lock_interruptible(&call->user_mutex) < 0) { | |
500 | release_sock(&rx->sk); | |
501 | kleave(" = -ERESTARTSYS"); | |
502 | return ERR_PTR(-ERESTARTSYS); | |
503 | } | |
504 | ||
505 | write_lock(&rx->call_lock); | |
17926a79 DH |
506 | list_del_init(&call->accept_link); |
507 | sk_acceptq_removed(&rx->sk); | |
e34d4234 | 508 | rxrpc_see_call(call); |
17926a79 | 509 | |
540b1c48 DH |
510 | /* Find the user ID insertion point. */ |
511 | pp = &rx->calls.rb_node; | |
512 | parent = NULL; | |
513 | while (*pp) { | |
514 | parent = *pp; | |
515 | call = rb_entry(parent, struct rxrpc_call, sock_node); | |
516 | ||
517 | if (user_call_ID < call->user_call_ID) | |
518 | pp = &(*pp)->rb_left; | |
519 | else if (user_call_ID > call->user_call_ID) | |
520 | pp = &(*pp)->rb_right; | |
521 | else | |
522 | BUG(); | |
523 | } | |
524 | ||
17926a79 DH |
525 | write_lock_bh(&call->state_lock); |
526 | switch (call->state) { | |
527 | case RXRPC_CALL_SERVER_ACCEPTING: | |
528 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | |
529 | break; | |
f5c17aae DH |
530 | case RXRPC_CALL_COMPLETE: |
531 | ret = call->error; | |
17926a79 | 532 | goto out_release; |
17926a79 DH |
533 | default: |
534 | BUG(); | |
535 | } | |
536 | ||
537 | /* formalise the acceptance */ | |
d001648e | 538 | call->notify_rx = notify_rx; |
17926a79 | 539 | call->user_call_ID = user_call_ID; |
248f219c | 540 | rxrpc_get_call(call, rxrpc_call_got_userid); |
17926a79 DH |
541 | rb_link_node(&call->sock_node, parent, pp); |
542 | rb_insert_color(&call->sock_node, &rx->calls); | |
543 | if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) | |
544 | BUG(); | |
17926a79 DH |
545 | |
546 | write_unlock_bh(&call->state_lock); | |
547 | write_unlock(&rx->call_lock); | |
248f219c DH |
548 | rxrpc_notify_socket(call); |
549 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
540b1c48 | 550 | release_sock(&rx->sk); |
651350d1 DH |
551 | _leave(" = %p{%d}", call, call->debug_id); |
552 | return call; | |
553 | ||
651350d1 | 554 | out_release: |
248f219c | 555 | _debug("release %p", call); |
651350d1 | 556 | write_unlock_bh(&call->state_lock); |
8d94aa38 | 557 | write_unlock(&rx->call_lock); |
8d94aa38 | 558 | rxrpc_release_call(rx, call); |
248f219c DH |
559 | rxrpc_put_call(call, rxrpc_call_put); |
560 | goto out; | |
561 | ||
562 | id_in_use: | |
563 | ret = -EBADSLT; | |
651350d1 | 564 | write_unlock(&rx->call_lock); |
248f219c DH |
565 | out: |
566 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
540b1c48 | 567 | release_sock(&rx->sk); |
651350d1 DH |
568 | _leave(" = %d", ret); |
569 | return ERR_PTR(ret); | |
570 | } | |
571 | ||
572 | /* | |
b4f1342f | 573 | * Handle rejection of a call by userspace |
651350d1 DH |
574 | * - reject the call at the front of the queue |
575 | */ | |
576 | int rxrpc_reject_call(struct rxrpc_sock *rx) | |
577 | { | |
578 | struct rxrpc_call *call; | |
248f219c | 579 | bool abort = false; |
651350d1 DH |
580 | int ret; |
581 | ||
582 | _enter(""); | |
583 | ||
584 | ASSERT(!irqs_disabled()); | |
585 | ||
586 | write_lock(&rx->call_lock); | |
587 | ||
248f219c | 588 | if (list_empty(&rx->to_be_accepted)) { |
8d94aa38 | 589 | write_unlock(&rx->call_lock); |
8d94aa38 DH |
590 | return -ENODATA; |
591 | } | |
651350d1 | 592 | |
248f219c DH |
593 | /* Dequeue the first call and check it's still valid. We gain |
594 | * responsibility for the queue's reference. | |
595 | */ | |
596 | call = list_entry(rx->to_be_accepted.next, | |
597 | struct rxrpc_call, accept_link); | |
651350d1 DH |
598 | list_del_init(&call->accept_link); |
599 | sk_acceptq_removed(&rx->sk); | |
e34d4234 | 600 | rxrpc_see_call(call); |
651350d1 DH |
601 | |
602 | write_lock_bh(&call->state_lock); | |
603 | switch (call->state) { | |
604 | case RXRPC_CALL_SERVER_ACCEPTING: | |
3a92789a | 605 | __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); |
248f219c DH |
606 | abort = true; |
607 | /* fall through */ | |
f5c17aae DH |
608 | case RXRPC_CALL_COMPLETE: |
609 | ret = call->error; | |
248f219c | 610 | goto out_discard; |
651350d1 DH |
611 | default: |
612 | BUG(); | |
613 | } | |
17926a79 | 614 | |
248f219c | 615 | out_discard: |
17926a79 | 616 | write_unlock_bh(&call->state_lock); |
17926a79 | 617 | write_unlock(&rx->call_lock); |
248f219c | 618 | if (abort) { |
26cb02aa | 619 | rxrpc_send_abort_packet(call); |
248f219c DH |
620 | rxrpc_release_call(rx, call); |
621 | rxrpc_put_call(call, rxrpc_call_put); | |
622 | } | |
623 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
651350d1 DH |
624 | _leave(" = %d", ret); |
625 | return ret; | |
626 | } | |
00e90712 DH |
627 | |
628 | /* | |
629 | * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls | |
630 | * @sock: The socket on which to preallocate | |
631 | * @notify_rx: Event notification function for the call | |
632 | * @user_attach_call: Func to attach call to user_call_ID | |
633 | * @user_call_ID: The tag to attach to the preallocated call | |
634 | * @gfp: The allocation conditions. | |
a25e21f0 | 635 | * @debug_id: The tracing debug ID. |
00e90712 DH |
636 | * |
637 | * Charge up the socket with preallocated calls, each with a user ID. A | |
638 | * function should be provided to effect the attachment from the user's side. | |
639 | * The user is given a ref to hold on the call. | |
640 | * | |
641 | * Note that the call may be come connected before this function returns. | |
642 | */ | |
643 | int rxrpc_kernel_charge_accept(struct socket *sock, | |
644 | rxrpc_notify_rx_t notify_rx, | |
645 | rxrpc_user_attach_call_t user_attach_call, | |
a25e21f0 DH |
646 | unsigned long user_call_ID, gfp_t gfp, |
647 | unsigned int debug_id) | |
00e90712 DH |
648 | { |
649 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | |
650 | struct rxrpc_backlog *b = rx->backlog; | |
651 | ||
652 | if (sock->sk->sk_state == RXRPC_CLOSE) | |
653 | return -ESHUTDOWN; | |
654 | ||
655 | return rxrpc_service_prealloc_one(rx, b, notify_rx, | |
656 | user_attach_call, user_call_ID, | |
a25e21f0 | 657 | gfp, debug_id); |
00e90712 DH |
658 | } |
659 | EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |