Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
17926a79 DH |
2 | /* incoming call handling |
3 | * | |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
17926a79 DH |
6 | */ |
7 | ||
9b6d5398 JP |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | ||
17926a79 DH |
10 | #include <linux/module.h> |
11 | #include <linux/net.h> | |
12 | #include <linux/skbuff.h> | |
13 | #include <linux/errqueue.h> | |
14 | #include <linux/udp.h> | |
15 | #include <linux/in.h> | |
16 | #include <linux/in6.h> | |
17 | #include <linux/icmp.h> | |
5a0e3ad6 | 18 | #include <linux/gfp.h> |
00e90712 | 19 | #include <linux/circ_buf.h> |
17926a79 DH |
20 | #include <net/sock.h> |
21 | #include <net/af_rxrpc.h> | |
22 | #include <net/ip.h> | |
23 | #include "ar-internal.h" | |
24 | ||
0041cd5a DH |
25 | static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, |
26 | unsigned long user_call_ID) | |
27 | { | |
28 | } | |
29 | ||
00e90712 DH |
30 | /* |
31 | * Preallocate a single service call, connection and peer and, if possible, | |
32 | * give them a user ID and attach the user's side of the ID to them. | |
33 | */ | |
34 | static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |
35 | struct rxrpc_backlog *b, | |
36 | rxrpc_notify_rx_t notify_rx, | |
37 | rxrpc_user_attach_call_t user_attach_call, | |
a25e21f0 DH |
38 | unsigned long user_call_ID, gfp_t gfp, |
39 | unsigned int debug_id) | |
00e90712 | 40 | { |
2d914c1b | 41 | struct rxrpc_call *call, *xcall; |
2baec2c3 | 42 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
2d914c1b | 43 | struct rb_node *parent, **pp; |
00e90712 DH |
44 | int max, tmp; |
45 | unsigned int size = RXRPC_BACKLOG_MAX; | |
46 | unsigned int head, tail, call_head, call_tail; | |
47 | ||
48 | max = rx->sk.sk_max_ack_backlog; | |
49 | tmp = rx->sk.sk_ack_backlog; | |
50 | if (tmp >= max) { | |
51 | _leave(" = -ENOBUFS [full %u]", max); | |
52 | return -ENOBUFS; | |
53 | } | |
54 | max -= tmp; | |
55 | ||
56 | /* We don't need more conns and peers than we have calls, but on the | |
57 | * other hand, we shouldn't ever use more peers than conns or conns | |
58 | * than calls. | |
59 | */ | |
60 | call_head = b->call_backlog_head; | |
61 | call_tail = READ_ONCE(b->call_backlog_tail); | |
62 | tmp = CIRC_CNT(call_head, call_tail, size); | |
63 | if (tmp >= max) { | |
64 | _leave(" = -ENOBUFS [enough %u]", tmp); | |
65 | return -ENOBUFS; | |
66 | } | |
67 | max = tmp + 1; | |
68 | ||
69 | head = b->peer_backlog_head; | |
70 | tail = READ_ONCE(b->peer_backlog_tail); | |
71 | if (CIRC_CNT(head, tail, size) < max) { | |
47c810a7 DH |
72 | struct rxrpc_peer *peer; |
73 | ||
74 | peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc); | |
00e90712 DH |
75 | if (!peer) |
76 | return -ENOMEM; | |
77 | b->peer_backlog[head] = peer; | |
78 | smp_store_release(&b->peer_backlog_head, | |
79 | (head + 1) & (size - 1)); | |
80 | } | |
81 | ||
82 | head = b->conn_backlog_head; | |
83 | tail = READ_ONCE(b->conn_backlog_tail); | |
84 | if (CIRC_CNT(head, tail, size) < max) { | |
85 | struct rxrpc_connection *conn; | |
86 | ||
2baec2c3 | 87 | conn = rxrpc_prealloc_service_connection(rxnet, gfp); |
00e90712 DH |
88 | if (!conn) |
89 | return -ENOMEM; | |
90 | b->conn_backlog[head] = conn; | |
91 | smp_store_release(&b->conn_backlog_head, | |
92 | (head + 1) & (size - 1)); | |
93 | } | |
94 | ||
95 | /* Now it gets complicated, because calls get registered with the | |
2d914c1b | 96 | * socket here, with a user ID preassigned by the user. |
00e90712 | 97 | */ |
a25e21f0 | 98 | call = rxrpc_alloc_call(rx, gfp, debug_id); |
00e90712 DH |
99 | if (!call) |
100 | return -ENOMEM; | |
101 | call->flags |= (1 << RXRPC_CALL_IS_SERVICE); | |
96b4059f | 102 | rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC); |
5e6ef4f1 | 103 | __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events); |
00e90712 | 104 | |
cb0fc0c9 DH |
105 | trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), |
106 | user_call_ID, rxrpc_call_new_prealloc_service); | |
00e90712 DH |
107 | |
108 | write_lock(&rx->call_lock); | |
00e90712 | 109 | |
2d914c1b DH |
110 | /* Check the user ID isn't already in use */ |
111 | pp = &rx->calls.rb_node; | |
112 | parent = NULL; | |
113 | while (*pp) { | |
114 | parent = *pp; | |
115 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); | |
116 | if (user_call_ID < xcall->user_call_ID) | |
117 | pp = &(*pp)->rb_left; | |
118 | else if (user_call_ID > xcall->user_call_ID) | |
119 | pp = &(*pp)->rb_right; | |
120 | else | |
121 | goto id_in_use; | |
122 | } | |
123 | ||
124 | call->user_call_ID = user_call_ID; | |
125 | call->notify_rx = notify_rx; | |
126 | if (user_attach_call) { | |
cb0fc0c9 | 127 | rxrpc_get_call(call, rxrpc_call_get_kernel_service); |
00e90712 | 128 | user_attach_call(call, user_call_ID); |
00e90712 DH |
129 | } |
130 | ||
cb0fc0c9 | 131 | rxrpc_get_call(call, rxrpc_call_get_userid); |
2d914c1b DH |
132 | rb_link_node(&call->sock_node, parent, pp); |
133 | rb_insert_color(&call->sock_node, &rx->calls); | |
134 | set_bit(RXRPC_CALL_HAS_USERID, &call->flags); | |
135 | ||
248f219c DH |
136 | list_add(&call->sock_link, &rx->sock_calls); |
137 | ||
00e90712 DH |
138 | write_unlock(&rx->call_lock); |
139 | ||
d3be4d24 | 140 | rxnet = call->rxnet; |
3dd9c8b5 | 141 | spin_lock(&rxnet->call_lock); |
ad25f5cb | 142 | list_add_tail_rcu(&call->link, &rxnet->calls); |
3dd9c8b5 | 143 | spin_unlock(&rxnet->call_lock); |
00e90712 DH |
144 | |
145 | b->call_backlog[call_head] = call; | |
146 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); | |
147 | _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); | |
148 | return 0; | |
149 | ||
150 | id_in_use: | |
151 | write_unlock(&rx->call_lock); | |
152 | rxrpc_cleanup_call(call); | |
153 | _leave(" = -EBADSLT"); | |
154 | return -EBADSLT; | |
155 | } | |
156 | ||
157 | /* | |
2d914c1b DH |
158 | * Allocate the preallocation buffers for incoming service calls. These must |
159 | * be charged manually. | |
00e90712 DH |
160 | */ |
161 | int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) | |
162 | { | |
163 | struct rxrpc_backlog *b = rx->backlog; | |
164 | ||
165 | if (!b) { | |
166 | b = kzalloc(sizeof(struct rxrpc_backlog), gfp); | |
167 | if (!b) | |
168 | return -ENOMEM; | |
169 | rx->backlog = b; | |
170 | } | |
171 | ||
00e90712 DH |
172 | return 0; |
173 | } | |
174 | ||
175 | /* | |
176 | * Discard the preallocation on a service. | |
177 | */ | |
178 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) | |
179 | { | |
180 | struct rxrpc_backlog *b = rx->backlog; | |
2baec2c3 | 181 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e90712 DH |
182 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; |
183 | ||
184 | if (!b) | |
185 | return; | |
186 | rx->backlog = NULL; | |
187 | ||
248f219c DH |
188 | /* Make sure that there aren't any incoming calls in progress before we |
189 | * clear the preallocation buffers. | |
190 | */ | |
3dd9c8b5 DH |
191 | spin_lock(&rx->incoming_lock); |
192 | spin_unlock(&rx->incoming_lock); | |
248f219c | 193 | |
00e90712 DH |
194 | head = b->peer_backlog_head; |
195 | tail = b->peer_backlog_tail; | |
196 | while (CIRC_CNT(head, tail, size) > 0) { | |
197 | struct rxrpc_peer *peer = b->peer_backlog[tail]; | |
8395406b | 198 | rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_peer); |
00e90712 DH |
199 | kfree(peer); |
200 | tail = (tail + 1) & (size - 1); | |
201 | } | |
202 | ||
203 | head = b->conn_backlog_head; | |
204 | tail = b->conn_backlog_tail; | |
205 | while (CIRC_CNT(head, tail, size) > 0) { | |
206 | struct rxrpc_connection *conn = b->conn_backlog[tail]; | |
2baec2c3 | 207 | write_lock(&rxnet->conn_lock); |
00e90712 DH |
208 | list_del(&conn->link); |
209 | list_del(&conn->proc_link); | |
2baec2c3 | 210 | write_unlock(&rxnet->conn_lock); |
00e90712 | 211 | kfree(conn); |
31f5f9a1 | 212 | if (atomic_dec_and_test(&rxnet->nr_conns)) |
5bb053be | 213 | wake_up_var(&rxnet->nr_conns); |
00e90712 DH |
214 | tail = (tail + 1) & (size - 1); |
215 | } | |
216 | ||
217 | head = b->call_backlog_head; | |
218 | tail = b->call_backlog_tail; | |
219 | while (CIRC_CNT(head, tail, size) > 0) { | |
220 | struct rxrpc_call *call = b->call_backlog[tail]; | |
88f2a825 | 221 | rcu_assign_pointer(call->socket, rx); |
00e90712 DH |
222 | if (rx->discard_new_call) { |
223 | _debug("discard %lx", call->user_call_ID); | |
224 | rx->discard_new_call(call, call->user_call_ID); | |
0041cd5a DH |
225 | if (call->notify_rx) |
226 | call->notify_rx = rxrpc_dummy_notify; | |
3432a757 | 227 | rxrpc_put_call(call, rxrpc_call_put_kernel); |
00e90712 DH |
228 | } |
229 | rxrpc_call_completed(call); | |
230 | rxrpc_release_call(rx, call); | |
cb0fc0c9 | 231 | rxrpc_put_call(call, rxrpc_call_put_discard_prealloc); |
00e90712 DH |
232 | tail = (tail + 1) & (size - 1); |
233 | } | |
234 | ||
235 | kfree(b); | |
236 | } | |
237 | ||
17926a79 | 238 | /* |
248f219c DH |
239 | * Allocate a new incoming call from the prealloc pool, along with a connection |
240 | * and a peer as necessary. | |
17926a79 | 241 | */ |
248f219c DH |
242 | static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, |
243 | struct rxrpc_local *local, | |
0099dc58 | 244 | struct rxrpc_peer *peer, |
248f219c | 245 | struct rxrpc_connection *conn, |
063c60d3 | 246 | const struct rxrpc_security *sec, |
393a2a20 | 247 | struct sockaddr_rxrpc *peer_srx, |
248f219c | 248 | struct sk_buff *skb) |
17926a79 | 249 | { |
248f219c | 250 | struct rxrpc_backlog *b = rx->backlog; |
248f219c DH |
251 | struct rxrpc_call *call; |
252 | unsigned short call_head, conn_head, peer_head; | |
253 | unsigned short call_tail, conn_tail, peer_tail; | |
254 | unsigned short call_count, conn_count; | |
255 | ||
256 | /* #calls >= #conns >= #peers must hold true. */ | |
257 | call_head = smp_load_acquire(&b->call_backlog_head); | |
258 | call_tail = b->call_backlog_tail; | |
259 | call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); | |
260 | conn_head = smp_load_acquire(&b->conn_backlog_head); | |
261 | conn_tail = b->conn_backlog_tail; | |
262 | conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); | |
263 | ASSERTCMP(conn_count, >=, call_count); | |
264 | peer_head = smp_load_acquire(&b->peer_backlog_head); | |
265 | peer_tail = b->peer_backlog_tail; | |
266 | ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, | |
267 | conn_count); | |
268 | ||
269 | if (call_count == 0) | |
270 | return NULL; | |
271 | ||
272 | if (!conn) { | |
47c810a7 | 273 | if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn)) |
0099dc58 DH |
274 | peer = NULL; |
275 | if (!peer) { | |
276 | peer = b->peer_backlog[peer_tail]; | |
393a2a20 | 277 | peer->srx = *peer_srx; |
248f219c DH |
278 | b->peer_backlog[peer_tail] = NULL; |
279 | smp_store_release(&b->peer_backlog_tail, | |
280 | (peer_tail + 1) & | |
281 | (RXRPC_BACKLOG_MAX - 1)); | |
0099dc58 | 282 | |
8a758d98 | 283 | rxrpc_new_incoming_peer(local, peer); |
248f219c | 284 | } |
17926a79 | 285 | |
248f219c DH |
286 | /* Now allocate and set up the connection */ |
287 | conn = b->conn_backlog[conn_tail]; | |
288 | b->conn_backlog[conn_tail] = NULL; | |
289 | smp_store_release(&b->conn_backlog_tail, | |
290 | (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
0fde882f | 291 | conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn); |
2cc80086 | 292 | conn->peer = peer; |
7fa25105 | 293 | rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn); |
ec832bd0 | 294 | rxrpc_new_incoming_connection(rx, conn, sec, skb); |
248f219c | 295 | } else { |
7fa25105 | 296 | rxrpc_get_connection(conn, rxrpc_conn_get_service_conn); |
3cec055c | 297 | atomic_inc(&conn->active); |
17926a79 DH |
298 | } |
299 | ||
248f219c DH |
300 | /* And now we can allocate and set up a new call */ |
301 | call = b->call_backlog[call_tail]; | |
302 | b->call_backlog[call_tail] = NULL; | |
303 | smp_store_release(&b->call_backlog_tail, | |
304 | (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
305 | ||
cb0fc0c9 | 306 | rxrpc_see_call(call, rxrpc_call_see_accept); |
f3441d41 | 307 | call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call); |
248f219c | 308 | call->conn = conn; |
91fcfbe8 | 309 | call->security = conn->security; |
2d914c1b | 310 | call->security_ix = conn->security_ix; |
47c810a7 | 311 | call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept); |
f3441d41 | 312 | call->dest_srx = peer->srx; |
1fc4fa2a DH |
313 | call->cong_ssthresh = call->peer->cong_ssthresh; |
314 | call->tx_last_sent = ktime_get_real(); | |
248f219c | 315 | return call; |
17926a79 DH |
316 | } |
317 | ||
318 | /* | |
5e6ef4f1 | 319 | * Set up a new incoming call. Called from the I/O thread. |
248f219c DH |
320 | * |
321 | * If this is for a kernel service, when we allocate the call, it will have | |
322 | * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the | |
323 | * retainer ref obtained from the backlog buffer. Prealloc calls for userspace | |
5e6ef4f1 | 324 | * services only have the ref from the backlog buffer. |
248f219c DH |
325 | * |
326 | * If we want to report an error, we mark the skb with the packet type and | |
5e6ef4f1 | 327 | * abort code and return false. |
17926a79 | 328 | */ |
57af281e DH |
329 | bool rxrpc_new_incoming_call(struct rxrpc_local *local, |
330 | struct rxrpc_peer *peer, | |
331 | struct rxrpc_connection *conn, | |
332 | struct sockaddr_rxrpc *peer_srx, | |
333 | struct sk_buff *skb) | |
17926a79 | 334 | { |
063c60d3 | 335 | const struct rxrpc_security *sec = NULL; |
5e6ef4f1 | 336 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
063c60d3 | 337 | struct rxrpc_call *call = NULL; |
5e6ef4f1 | 338 | struct rxrpc_sock *rx; |
17926a79 DH |
339 | |
340 | _enter(""); | |
341 | ||
57af281e DH |
342 | /* Don't set up a call for anything other than a DATA packet. */ |
343 | if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA) | |
344 | return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call); | |
5e6ef4f1 | 345 | |
42f229c3 | 346 | read_lock(&local->services_lock); |
5e6ef4f1 DH |
347 | |
348 | /* Weed out packets to services we're not offering. Packets that would | |
349 | * begin a call are explicitly rejected and the rest are just | |
350 | * discarded. | |
351 | */ | |
42f229c3 | 352 | rx = local->service; |
5e6ef4f1 DH |
353 | if (!rx || (sp->hdr.serviceId != rx->srx.srx_service && |
354 | sp->hdr.serviceId != rx->second_service) | |
355 | ) { | |
356 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && | |
357 | sp->hdr.seq == 1) | |
358 | goto unsupported_service; | |
359 | goto discard; | |
360 | } | |
361 | ||
362 | if (!conn) { | |
363 | sec = rxrpc_get_incoming_security(rx, skb); | |
364 | if (!sec) | |
57af281e | 365 | goto unsupported_security; |
5e6ef4f1 DH |
366 | } |
367 | ||
248f219c | 368 | spin_lock(&rx->incoming_lock); |
210f0353 DH |
369 | if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || |
370 | rx->sk.sk_state == RXRPC_CLOSE) { | |
57af281e DH |
371 | rxrpc_direct_abort(skb, rxrpc_abort_shut_down, |
372 | RX_INVALID_OPERATION, -ESHUTDOWN); | |
f33121cb | 373 | goto no_call; |
17926a79 | 374 | } |
17926a79 | 375 | |
393a2a20 DH |
376 | call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, peer_srx, |
377 | skb); | |
248f219c | 378 | if (!call) { |
ece64fec | 379 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; |
f33121cb | 380 | goto no_call; |
248f219c | 381 | } |
58dc63c9 DH |
382 | |
383 | trace_rxrpc_receive(call, rxrpc_receive_incoming, | |
384 | sp->hdr.serial, sp->hdr.seq); | |
17926a79 | 385 | |
248f219c DH |
386 | /* Make the call live. */ |
387 | rxrpc_incoming_call(rx, call, skb); | |
388 | conn = call->conn; | |
17926a79 | 389 | |
248f219c DH |
390 | if (rx->notify_new_call) |
391 | rx->notify_new_call(&rx->sk, call, call->user_call_ID); | |
17926a79 | 392 | |
248f219c | 393 | spin_lock(&conn->state_lock); |
5e6ef4f1 | 394 | if (conn->state == RXRPC_CONN_SERVICE_UNSECURED) { |
248f219c DH |
395 | conn->state = RXRPC_CONN_SERVICE_CHALLENGING; |
396 | set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); | |
7fa25105 | 397 | rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge); |
17926a79 | 398 | } |
248f219c | 399 | spin_unlock(&conn->state_lock); |
13b7955a | 400 | |
5e6ef4f1 | 401 | spin_unlock(&rx->incoming_lock); |
42f229c3 | 402 | read_unlock(&local->services_lock); |
17926a79 | 403 | |
29fb4ec3 DH |
404 | if (hlist_unhashed(&call->error_link)) { |
405 | spin_lock(&call->peer->lock); | |
406 | hlist_add_head(&call->error_link, &call->peer->error_targets); | |
407 | spin_unlock(&call->peer->lock); | |
408 | } | |
409 | ||
f33121cb | 410 | _leave(" = %p{%d}", call, call->debug_id); |
5e6ef4f1 DH |
411 | rxrpc_input_call_event(call, skb); |
412 | rxrpc_put_call(call, rxrpc_call_put_input); | |
57af281e | 413 | return true; |
5e6ef4f1 DH |
414 | |
415 | unsupported_service: | |
42f229c3 | 416 | read_unlock(&local->services_lock); |
57af281e DH |
417 | return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered, |
418 | RX_INVALID_OPERATION, -EOPNOTSUPP); | |
419 | unsupported_security: | |
42f229c3 | 420 | read_unlock(&local->services_lock); |
57af281e DH |
421 | return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered, |
422 | RX_INVALID_OPERATION, -EKEYREJECTED); | |
f33121cb DH |
423 | no_call: |
424 | spin_unlock(&rx->incoming_lock); | |
42f229c3 | 425 | read_unlock(&local->services_lock); |
5e6ef4f1 | 426 | _leave(" = f [%u]", skb->mark); |
57af281e | 427 | return false; |
5e6ef4f1 | 428 | discard: |
42f229c3 | 429 | read_unlock(&local->services_lock); |
57af281e | 430 | return true; |
17926a79 DH |
431 | } |
432 | ||
433 | /* | |
2d914c1b | 434 | * Charge up socket with preallocated calls, attaching user call IDs. |
651350d1 | 435 | */ |
2d914c1b | 436 | int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID) |
651350d1 | 437 | { |
2d914c1b | 438 | struct rxrpc_backlog *b = rx->backlog; |
651350d1 | 439 | |
2d914c1b DH |
440 | if (rx->sk.sk_state == RXRPC_CLOSE) |
441 | return -ESHUTDOWN; | |
651350d1 | 442 | |
2d914c1b DH |
443 | return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID, |
444 | GFP_KERNEL, | |
445 | atomic_inc_return(&rxrpc_debug_id)); | |
651350d1 | 446 | } |
00e90712 DH |
447 | |
448 | /* | |
449 | * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls | |
450 | * @sock: The socket on which to preallocate | |
451 | * @notify_rx: Event notification function for the call | |
452 | * @user_attach_call: Func to attach call to user_call_ID | |
453 | * @user_call_ID: The tag to attach to the preallocated call | |
454 | * @gfp: The allocation conditions. | |
a25e21f0 | 455 | * @debug_id: The tracing debug ID. |
00e90712 DH |
456 | * |
457 | * Charge up the socket with preallocated calls, each with a user ID. A | |
458 | * function should be provided to effect the attachment from the user's side. | |
459 | * The user is given a ref to hold on the call. | |
460 | * | |
461 | * Note that the call may be come connected before this function returns. | |
462 | */ | |
463 | int rxrpc_kernel_charge_accept(struct socket *sock, | |
464 | rxrpc_notify_rx_t notify_rx, | |
465 | rxrpc_user_attach_call_t user_attach_call, | |
a25e21f0 DH |
466 | unsigned long user_call_ID, gfp_t gfp, |
467 | unsigned int debug_id) | |
00e90712 DH |
468 | { |
469 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | |
470 | struct rxrpc_backlog *b = rx->backlog; | |
471 | ||
472 | if (sock->sk->sk_state == RXRPC_CLOSE) | |
473 | return -ESHUTDOWN; | |
474 | ||
475 | return rxrpc_service_prealloc_one(rx, b, notify_rx, | |
476 | user_attach_call, user_call_ID, | |
a25e21f0 | 477 | gfp, debug_id); |
00e90712 DH |
478 | } |
479 | EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |