Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
45025bce | 2 | /* RxRPC virtual connection handler, common bits. |
17926a79 | 3 | * |
45025bce | 4 | * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. |
17926a79 | 5 | * Written by David Howells (dhowells@redhat.com) |
17926a79 DH |
6 | */ |
7 | ||
9b6d5398 JP |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | ||
17926a79 | 10 | #include <linux/module.h> |
5a0e3ad6 | 11 | #include <linux/slab.h> |
17926a79 DH |
12 | #include <linux/net.h> |
13 | #include <linux/skbuff.h> | |
17926a79 DH |
14 | #include "ar-internal.h" |
15 | ||
5873c083 DH |
16 | /* |
17 | * Time till a connection expires after last use (in seconds). | |
18 | */ | |
f859ab61 DH |
19 | unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60; |
20 | unsigned int __read_mostly rxrpc_closed_conn_expiry = 10; | |
5873c083 | 21 | |
3cec055c DH |
22 | static void rxrpc_clean_up_connection(struct work_struct *work); |
23 | static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet, | |
24 | unsigned long reap_at); | |
45025bce | 25 | |
3136ef49 DH |
26 | static void rxrpc_connection_timer(struct timer_list *timer) |
27 | { | |
28 | struct rxrpc_connection *conn = | |
29 | container_of(timer, struct rxrpc_connection, timer); | |
30 | ||
7fa25105 | 31 | rxrpc_queue_conn(conn, rxrpc_conn_queue_timer); |
3136ef49 DH |
32 | } |
33 | ||
17926a79 DH |
34 | /* |
35 | * allocate a new connection | |
36 | */ | |
3cec055c DH |
37 | struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet, |
38 | gfp_t gfp) | |
17926a79 DH |
39 | { |
40 | struct rxrpc_connection *conn; | |
41 | ||
42 | _enter(""); | |
43 | ||
44 | conn = kzalloc(sizeof(struct rxrpc_connection), gfp); | |
45 | if (conn) { | |
45025bce | 46 | INIT_LIST_HEAD(&conn->cache_link); |
3136ef49 | 47 | timer_setup(&conn->timer, &rxrpc_connection_timer, 0); |
3cec055c DH |
48 | INIT_WORK(&conn->processor, rxrpc_process_connection); |
49 | INIT_WORK(&conn->destructor, rxrpc_clean_up_connection); | |
4d028b2c | 50 | INIT_LIST_HEAD(&conn->proc_link); |
999b69f8 | 51 | INIT_LIST_HEAD(&conn->link); |
17926a79 | 52 | skb_queue_head_init(&conn->rx_queue); |
3cec055c | 53 | conn->rxnet = rxnet; |
e0e4d82f | 54 | conn->security = &rxrpc_no_security; |
17926a79 | 55 | spin_lock_init(&conn->state_lock); |
17926a79 | 56 | conn->debug_id = atomic_inc_return(&rxrpc_debug_id); |
f51b4480 | 57 | conn->idle_timestamp = jiffies; |
17926a79 DH |
58 | } |
59 | ||
16c61add | 60 | _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0); |
17926a79 DH |
61 | return conn; |
62 | } | |
63 | ||
17926a79 | 64 | /* |
8496af50 DH |
65 | * Look up a connection in the cache by protocol parameters. |
66 | * | |
67 | * If successful, a pointer to the connection is returned, but no ref is taken. | |
68 | * NULL is returned if there is no match. | |
69 | * | |
0099dc58 DH |
70 | * When searching for a service call, if we find a peer but no connection, we |
71 | * return that through *_peer in case we need to create a new service call. | |
72 | * | |
8496af50 | 73 | * The caller must be holding the RCU read lock. |
17926a79 | 74 | */ |
5e6ef4f1 DH |
75 | struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local, |
76 | struct sockaddr_rxrpc *srx, | |
77 | struct sk_buff *skb) | |
17926a79 DH |
78 | { |
79 | struct rxrpc_connection *conn; | |
42886ffe | 80 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
1291e9d1 | 81 | struct rxrpc_peer *peer; |
17926a79 | 82 | |
8496af50 | 83 | _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK); |
17926a79 | 84 | |
5e6ef4f1 DH |
85 | /* Look up client connections by connection ID alone as their IDs are |
86 | * unique for this machine. | |
87 | */ | |
88 | conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT); | |
89 | if (!conn || refcount_read(&conn->ref) == 0) { | |
90 | _debug("no conn"); | |
91 | goto not_found; | |
92 | } | |
8496af50 | 93 | |
5e6ef4f1 DH |
94 | if (conn->proto.epoch != sp->hdr.epoch || |
95 | conn->local != local) | |
96 | goto not_found; | |
97 | ||
98 | peer = conn->peer; | |
99 | switch (srx->transport.family) { | |
100 | case AF_INET: | |
101 | if (peer->srx.transport.sin.sin_port != | |
102 | srx->transport.sin.sin_port || | |
103 | peer->srx.transport.sin.sin_addr.s_addr != | |
104 | srx->transport.sin.sin_addr.s_addr) | |
1291e9d1 | 105 | goto not_found; |
5e6ef4f1 | 106 | break; |
d1912747 | 107 | #ifdef CONFIG_AF_RXRPC_IPV6 |
5e6ef4f1 DH |
108 | case AF_INET6: |
109 | if (peer->srx.transport.sin6.sin6_port != | |
110 | srx->transport.sin6.sin6_port || | |
111 | memcmp(&peer->srx.transport.sin6.sin6_addr, | |
112 | &srx->transport.sin6.sin6_addr, | |
113 | sizeof(struct in6_addr)) != 0) | |
114 | goto not_found; | |
115 | break; | |
d1912747 | 116 | #endif |
5e6ef4f1 DH |
117 | default: |
118 | BUG(); | |
17926a79 DH |
119 | } |
120 | ||
5e6ef4f1 DH |
121 | _leave(" = %p", conn); |
122 | return conn; | |
123 | ||
1291e9d1 | 124 | not_found: |
17926a79 DH |
125 | _leave(" = NULL"); |
126 | return NULL; | |
17926a79 DH |
127 | } |
128 | ||
999b69f8 DH |
129 | /* |
130 | * Disconnect a call and clear any channel it occupies when that call | |
a1399f8b DH |
131 | * terminates. The caller must hold the channel_lock and must release the |
132 | * call's ref on the connection. | |
999b69f8 | 133 | */ |
45025bce DH |
134 | void __rxrpc_disconnect_call(struct rxrpc_connection *conn, |
135 | struct rxrpc_call *call) | |
999b69f8 | 136 | { |
01a90a45 DH |
137 | struct rxrpc_channel *chan = |
138 | &conn->channels[call->cid & RXRPC_CHANNELMASK]; | |
999b69f8 | 139 | |
01a90a45 | 140 | _enter("%d,%x", conn->debug_id, call->cid); |
999b69f8 | 141 | |
a1399f8b DH |
142 | if (rcu_access_pointer(chan->call) == call) { |
143 | /* Save the result of the call so that we can repeat it if necessary | |
144 | * through the channel, whilst disposing of the actual call record. | |
145 | */ | |
b1d9f7fd | 146 | trace_rxrpc_disconnect_call(call); |
17e9e23b DH |
147 | switch (call->completion) { |
148 | case RXRPC_CALL_SUCCEEDED: | |
5d7edbc9 | 149 | chan->last_seq = call->rx_highest_seq; |
18bfeba5 | 150 | chan->last_type = RXRPC_PACKET_TYPE_ACK; |
17e9e23b DH |
151 | break; |
152 | case RXRPC_CALL_LOCALLY_ABORTED: | |
153 | chan->last_abort = call->abort_code; | |
154 | chan->last_type = RXRPC_PACKET_TYPE_ABORT; | |
155 | break; | |
156 | default: | |
de696c47 | 157 | chan->last_abort = RX_CALL_DEAD; |
17e9e23b DH |
158 | chan->last_type = RXRPC_PACKET_TYPE_ABORT; |
159 | break; | |
18bfeba5 | 160 | } |
17e9e23b | 161 | |
18bfeba5 | 162 | /* Sync with rxrpc_conn_retransmit(). */ |
a1399f8b DH |
163 | smp_wmb(); |
164 | chan->last_call = chan->call_id; | |
165 | chan->call_id = chan->call_counter; | |
e653cfe4 | 166 | |
a1399f8b | 167 | rcu_assign_pointer(chan->call, NULL); |
999b69f8 | 168 | } |
e653cfe4 | 169 | |
a1399f8b DH |
170 | _leave(""); |
171 | } | |
172 | ||
173 | /* | |
174 | * Disconnect a call and clear any channel it occupies when that call | |
175 | * terminates. | |
176 | */ | |
177 | void rxrpc_disconnect_call(struct rxrpc_call *call) | |
178 | { | |
179 | struct rxrpc_connection *conn = call->conn; | |
180 | ||
5040011d DH |
181 | set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); |
182 | rxrpc_see_call(call, rxrpc_call_see_disconnected); | |
183 | ||
1fc4fa2a | 184 | call->peer->cong_ssthresh = call->cong_ssthresh; |
f7aec129 | 185 | |
65550098 | 186 | if (!hlist_unhashed(&call->error_link)) { |
29fb4ec3 DH |
187 | spin_lock(&call->peer->lock); |
188 | hlist_del_init(&call->error_link); | |
189 | spin_unlock(&call->peer->lock); | |
65550098 | 190 | } |
248f219c | 191 | |
5040011d DH |
192 | if (rxrpc_is_client_call(call)) { |
193 | rxrpc_disconnect_client_call(conn->bundle, call); | |
194 | } else { | |
195 | spin_lock(&conn->bundle->channel_lock); | |
196 | __rxrpc_disconnect_call(conn, call); | |
197 | spin_unlock(&conn->bundle->channel_lock); | |
45025bce | 198 | |
5040011d DH |
199 | conn->idle_timestamp = jiffies; |
200 | if (atomic_dec_and_test(&conn->active)) | |
201 | rxrpc_set_service_reap_timer(conn->rxnet, | |
202 | jiffies + rxrpc_connection_expiry); | |
203 | } | |
e653cfe4 | 204 | |
5040011d | 205 | rxrpc_put_call(call, rxrpc_call_put_io_thread); |
45025bce DH |
206 | } |
207 | ||
17926a79 | 208 | /* |
363deeab DH |
209 | * Queue a connection's work processor, getting a ref to pass to the work |
210 | * queue. | |
17926a79 | 211 | */ |
3cec055c | 212 | void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why) |
17926a79 | 213 | { |
3cec055c DH |
214 | if (atomic_read(&conn->active) >= 0 && |
215 | rxrpc_queue_work(&conn->processor)) | |
216 | rxrpc_see_connection(conn, why); | |
363deeab DH |
217 | } |
218 | ||
219 | /* | |
220 | * Note the re-emergence of a connection. | |
221 | */ | |
7fa25105 DH |
222 | void rxrpc_see_connection(struct rxrpc_connection *conn, |
223 | enum rxrpc_conn_trace why) | |
363deeab | 224 | { |
363deeab | 225 | if (conn) { |
7fa25105 | 226 | int r = refcount_read(&conn->ref); |
363deeab | 227 | |
7fa25105 | 228 | trace_rxrpc_conn(conn->debug_id, r, why); |
363deeab DH |
229 | } |
230 | } | |
231 | ||
232 | /* | |
233 | * Get a ref on a connection. | |
234 | */ | |
7fa25105 DH |
235 | struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn, |
236 | enum rxrpc_conn_trace why) | |
363deeab | 237 | { |
a0575429 | 238 | int r; |
363deeab | 239 | |
a0575429 | 240 | __refcount_inc(&conn->ref, &r); |
7fa25105 | 241 | trace_rxrpc_conn(conn->debug_id, r + 1, why); |
245500d8 | 242 | return conn; |
363deeab DH |
243 | } |
244 | ||
245 | /* | |
246 | * Try to get a ref on a connection. | |
247 | */ | |
248 | struct rxrpc_connection * | |
7fa25105 DH |
249 | rxrpc_get_connection_maybe(struct rxrpc_connection *conn, |
250 | enum rxrpc_conn_trace why) | |
363deeab | 251 | { |
a0575429 | 252 | int r; |
363deeab DH |
253 | |
254 | if (conn) { | |
a0575429 | 255 | if (__refcount_inc_not_zero(&conn->ref, &r)) |
7fa25105 | 256 | trace_rxrpc_conn(conn->debug_id, r + 1, why); |
363deeab DH |
257 | else |
258 | conn = NULL; | |
259 | } | |
260 | return conn; | |
261 | } | |
262 | ||
3d18cbb7 DH |
263 | /* |
264 | * Set the service connection reap timer. | |
265 | */ | |
266 | static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet, | |
267 | unsigned long reap_at) | |
268 | { | |
269 | if (rxnet->live) | |
270 | timer_reduce(&rxnet->service_conn_reap_timer, reap_at); | |
271 | } | |
272 | ||
17926a79 DH |
273 | /* |
274 | * destroy a virtual connection | |
275 | */ | |
3cec055c | 276 | static void rxrpc_rcu_free_connection(struct rcu_head *rcu) |
17926a79 | 277 | { |
dee46364 DH |
278 | struct rxrpc_connection *conn = |
279 | container_of(rcu, struct rxrpc_connection, rcu); | |
3cec055c | 280 | struct rxrpc_net *rxnet = conn->rxnet; |
dee46364 | 281 | |
a0575429 | 282 | _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref)); |
17926a79 | 283 | |
7fa25105 DH |
284 | trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref), |
285 | rxrpc_conn_free); | |
3cec055c | 286 | kfree(conn); |
7fa25105 | 287 | |
3cec055c DH |
288 | if (atomic_dec_and_test(&rxnet->nr_conns)) |
289 | wake_up_var(&rxnet->nr_conns); | |
290 | } | |
291 | ||
292 | /* | |
293 | * Clean up a dead connection. | |
294 | */ | |
295 | static void rxrpc_clean_up_connection(struct work_struct *work) | |
296 | { | |
297 | struct rxrpc_connection *conn = | |
298 | container_of(work, struct rxrpc_connection, destructor); | |
299 | struct rxrpc_net *rxnet = conn->rxnet; | |
300 | ||
301 | ASSERT(!rcu_access_pointer(conn->channels[0].call) && | |
302 | !rcu_access_pointer(conn->channels[1].call) && | |
303 | !rcu_access_pointer(conn->channels[2].call) && | |
304 | !rcu_access_pointer(conn->channels[3].call)); | |
305 | ASSERT(list_empty(&conn->cache_link)); | |
17926a79 | 306 | |
3136ef49 | 307 | del_timer_sync(&conn->timer); |
3cec055c DH |
308 | cancel_work_sync(&conn->processor); /* Processing may restart the timer */ |
309 | del_timer_sync(&conn->timer); | |
310 | ||
311 | write_lock(&rxnet->conn_lock); | |
312 | list_del_init(&conn->proc_link); | |
313 | write_unlock(&rxnet->conn_lock); | |
314 | ||
17926a79 DH |
315 | rxrpc_purge_queue(&conn->rx_queue); |
316 | ||
3cec055c DH |
317 | rxrpc_kill_client_conn(conn); |
318 | ||
e0e4d82f | 319 | conn->security->clear(conn); |
2cc80086 | 320 | key_put(conn->key); |
fa3492ab | 321 | rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn); |
47c810a7 | 322 | rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn); |
0fde882f | 323 | rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn); |
e0e4d82f | 324 | |
3cec055c DH |
325 | /* Drain the Rx queue. Note that even though we've unpublished, an |
326 | * incoming packet could still be being added to our Rx queue, so we | |
327 | * will need to drain it again in the RCU cleanup handler. | |
328 | */ | |
329 | rxrpc_purge_queue(&conn->rx_queue); | |
330 | ||
331 | call_rcu(&conn->rcu, rxrpc_rcu_free_connection); | |
332 | } | |
333 | ||
334 | /* | |
335 | * Drop a ref on a connection. | |
336 | */ | |
337 | void rxrpc_put_connection(struct rxrpc_connection *conn, | |
338 | enum rxrpc_conn_trace why) | |
339 | { | |
340 | unsigned int debug_id; | |
341 | bool dead; | |
342 | int r; | |
343 | ||
344 | if (!conn) | |
345 | return; | |
346 | ||
347 | debug_id = conn->debug_id; | |
348 | dead = __refcount_dec_and_test(&conn->ref, &r); | |
349 | trace_rxrpc_conn(debug_id, r - 1, why); | |
350 | if (dead) { | |
351 | del_timer(&conn->timer); | |
352 | cancel_work(&conn->processor); | |
353 | ||
354 | if (in_softirq() || work_busy(&conn->processor) || | |
355 | timer_pending(&conn->timer)) | |
356 | /* Can't use the rxrpc workqueue as we need to cancel/flush | |
357 | * something that may be running/waiting there. | |
358 | */ | |
359 | schedule_work(&conn->destructor); | |
360 | else | |
361 | rxrpc_clean_up_connection(&conn->destructor); | |
362 | } | |
17926a79 DH |
363 | } |
364 | ||
365 | /* | |
45025bce | 366 | * reap dead service connections |
17926a79 | 367 | */ |
2baec2c3 | 368 | void rxrpc_service_connection_reaper(struct work_struct *work) |
17926a79 DH |
369 | { |
370 | struct rxrpc_connection *conn, *_p; | |
2baec2c3 | 371 | struct rxrpc_net *rxnet = |
3d18cbb7 | 372 | container_of(work, struct rxrpc_net, service_conn_reaper); |
f859ab61 | 373 | unsigned long expire_at, earliest, idle_timestamp, now; |
3cec055c | 374 | int active; |
17926a79 DH |
375 | |
376 | LIST_HEAD(graveyard); | |
377 | ||
378 | _enter(""); | |
379 | ||
f51b4480 | 380 | now = jiffies; |
f859ab61 | 381 | earliest = now + MAX_JIFFY_OFFSET; |
17926a79 | 382 | |
2baec2c3 DH |
383 | write_lock(&rxnet->conn_lock); |
384 | list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { | |
3cec055c DH |
385 | ASSERTCMP(atomic_read(&conn->active), >=, 0); |
386 | if (likely(atomic_read(&conn->active) > 0)) | |
17926a79 | 387 | continue; |
00e90712 DH |
388 | if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) |
389 | continue; | |
17926a79 | 390 | |
2cc80086 | 391 | if (rxnet->live && !conn->local->dead) { |
f859ab61 DH |
392 | idle_timestamp = READ_ONCE(conn->idle_timestamp); |
393 | expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; | |
2cc80086 | 394 | if (conn->local->service_closed) |
f859ab61 DH |
395 | expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ; |
396 | ||
3cec055c DH |
397 | _debug("reap CONN %d { a=%d,t=%ld }", |
398 | conn->debug_id, atomic_read(&conn->active), | |
f859ab61 DH |
399 | (long)expire_at - (long)now); |
400 | ||
401 | if (time_before(now, expire_at)) { | |
402 | if (time_before(expire_at, earliest)) | |
403 | earliest = expire_at; | |
404 | continue; | |
405 | } | |
17926a79 | 406 | } |
001c1122 | 407 | |
3cec055c DH |
408 | /* The activity count sits at 0 whilst the conn is unused on |
409 | * the list; we reduce that to -1 to make the conn unavailable. | |
001c1122 | 410 | */ |
3cec055c DH |
411 | active = 0; |
412 | if (!atomic_try_cmpxchg(&conn->active, &active, -1)) | |
001c1122 | 413 | continue; |
7fa25105 | 414 | rxrpc_see_connection(conn, rxrpc_conn_see_reap_service); |
001c1122 DH |
415 | |
416 | if (rxrpc_conn_is_client(conn)) | |
45025bce | 417 | BUG(); |
001c1122 DH |
418 | else |
419 | rxrpc_unpublish_service_conn(conn); | |
420 | ||
421 | list_move_tail(&conn->link, &graveyard); | |
17926a79 | 422 | } |
2baec2c3 | 423 | write_unlock(&rxnet->conn_lock); |
17926a79 | 424 | |
f859ab61 DH |
425 | if (earliest != now + MAX_JIFFY_OFFSET) { |
426 | _debug("reschedule reaper %ld", (long)earliest - (long)now); | |
f51b4480 | 427 | ASSERT(time_after(earliest, now)); |
3d7682af | 428 | rxrpc_set_service_reap_timer(rxnet, earliest); |
17926a79 DH |
429 | } |
430 | ||
17926a79 DH |
431 | while (!list_empty(&graveyard)) { |
432 | conn = list_entry(graveyard.next, struct rxrpc_connection, | |
433 | link); | |
434 | list_del_init(&conn->link); | |
435 | ||
3cec055c DH |
436 | ASSERTCMP(atomic_read(&conn->active), ==, -1); |
437 | rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped); | |
17926a79 DH |
438 | } |
439 | ||
440 | _leave(""); | |
441 | } | |
442 | ||
443 | /* | |
45025bce DH |
444 | * preemptively destroy all the service connection records rather than |
445 | * waiting for them to time out | |
17926a79 | 446 | */ |
2baec2c3 | 447 | void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) |
17926a79 | 448 | { |
dee46364 DH |
449 | struct rxrpc_connection *conn, *_p; |
450 | bool leak = false; | |
451 | ||
17926a79 DH |
452 | _enter(""); |
453 | ||
31f5f9a1 | 454 | atomic_dec(&rxnet->nr_conns); |
2baec2c3 | 455 | rxrpc_destroy_all_client_connections(rxnet); |
45025bce | 456 | |
3d18cbb7 DH |
457 | del_timer_sync(&rxnet->service_conn_reap_timer); |
458 | rxrpc_queue_work(&rxnet->service_conn_reaper); | |
dee46364 DH |
459 | flush_workqueue(rxrpc_workqueue); |
460 | ||
2baec2c3 DH |
461 | write_lock(&rxnet->conn_lock); |
462 | list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { | |
dee46364 | 463 | pr_err("AF_RXRPC: Leaked conn %p {%d}\n", |
a0575429 | 464 | conn, refcount_read(&conn->ref)); |
dee46364 DH |
465 | leak = true; |
466 | } | |
2baec2c3 | 467 | write_unlock(&rxnet->conn_lock); |
dee46364 DH |
468 | BUG_ON(leak); |
469 | ||
2baec2c3 | 470 | ASSERT(list_empty(&rxnet->conn_proc_list)); |
17926a79 | 471 | |
31f5f9a1 DH |
472 | /* We need to wait for the connections to be destroyed by RCU as they |
473 | * pin things that we still need to get rid of. | |
474 | */ | |
5bb053be | 475 | wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns)); |
17926a79 DH |
476 | _leave(""); |
477 | } |