rxrpc: Fix missing start of call timeout
[linux-2.6-block.git] / net / rxrpc / local_object.c
CommitLineData
87563616 1/* Local endpoint object management
17926a79 2 *
4f95dd78 3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
17926a79
DH
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
87563616 7 * modify it under the terms of the GNU General Public Licence
17926a79 8 * as published by the Free Software Foundation; either version
87563616 9 * 2 of the Licence, or (at your option) any later version.
17926a79
DH
10 */
11
9b6d5398
JP
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
17926a79
DH
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
5a0e3ad6 17#include <linux/slab.h>
44ba0698
DH
18#include <linux/udp.h>
19#include <linux/ip.h>
4f95dd78 20#include <linux/hashtable.h>
17926a79
DH
21#include <net/sock.h>
22#include <net/af_rxrpc.h>
23#include "ar-internal.h"
24
4f95dd78
DH
25static void rxrpc_local_processor(struct work_struct *);
26static void rxrpc_local_rcu(struct rcu_head *);
17926a79 27
17926a79 28/*
4f95dd78
DH
29 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
30 * same or greater than.
31 *
32 * We explicitly don't compare the RxRPC service ID as we want to reject
33 * conflicting uses by differing services. Further, we don't want to share
34 * addresses with different options (IPv6), so we don't compare those bits
35 * either.
17926a79 36 */
4f95dd78
DH
37static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
38 const struct sockaddr_rxrpc *srx)
39{
40 long diff;
41
42 diff = ((local->srx.transport_type - srx->transport_type) ?:
43 (local->srx.transport_len - srx->transport_len) ?:
44 (local->srx.transport.family - srx->transport.family));
45 if (diff != 0)
46 return diff;
47
48 switch (srx->transport.family) {
49 case AF_INET:
50 /* If the choice of UDP port is left up to the transport, then
51 * the endpoint record doesn't match.
52 */
53 return ((u16 __force)local->srx.transport.sin.sin_port -
54 (u16 __force)srx->transport.sin.sin_port) ?:
55 memcmp(&local->srx.transport.sin.sin_addr,
56 &srx->transport.sin.sin_addr,
57 sizeof(struct in_addr));
d1912747 58#ifdef CONFIG_AF_RXRPC_IPV6
75b54cb5
DH
59 case AF_INET6:
60 /* If the choice of UDP6 port is left up to the transport, then
61 * the endpoint record doesn't match.
62 */
63 return ((u16 __force)local->srx.transport.sin6.sin6_port -
64 (u16 __force)srx->transport.sin6.sin6_port) ?:
65 memcmp(&local->srx.transport.sin6.sin6_addr,
66 &srx->transport.sin6.sin6_addr,
67 sizeof(struct in6_addr));
d1912747 68#endif
4f95dd78
DH
69 default:
70 BUG();
71 }
72}
73
74/*
75 * Allocate a new local endpoint.
76 */
2baec2c3
DH
77static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
78 const struct sockaddr_rxrpc *srx)
17926a79
DH
79{
80 struct rxrpc_local *local;
81
82 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
83 if (local) {
4f95dd78 84 atomic_set(&local->usage, 1);
2baec2c3 85 local->rxnet = rxnet;
17926a79 86 INIT_LIST_HEAD(&local->link);
4f95dd78 87 INIT_WORK(&local->processor, rxrpc_local_processor);
17926a79 88 init_rwsem(&local->defrag_sem);
17926a79 89 skb_queue_head_init(&local->reject_queue);
44ba0698 90 skb_queue_head_init(&local->event_queue);
999b69f8
DH
91 local->client_conns = RB_ROOT;
92 spin_lock_init(&local->client_conns_lock);
17926a79
DH
93 spin_lock_init(&local->lock);
94 rwlock_init(&local->services_lock);
17926a79
DH
95 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
96 memcpy(&local->srx, srx, sizeof(*srx));
28036f44 97 local->srx.srx_service = 0;
09d2bf59 98 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
17926a79
DH
99 }
100
101 _leave(" = %p", local);
102 return local;
103}
104
105/*
106 * create the local socket
4f95dd78 107 * - must be called with rxrpc_local_mutex locked
17926a79 108 */
2baec2c3 109static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
17926a79
DH
110{
111 struct sock *sock;
112 int ret, opt;
113
75b54cb5
DH
114 _enter("%p{%d,%d}",
115 local, local->srx.transport_type, local->srx.transport.family);
17926a79
DH
116
117 /* create a socket to represent the local endpoint */
2baec2c3 118 ret = sock_create_kern(net, local->srx.transport.family,
aaa31cbc 119 local->srx.transport_type, 0, &local->socket);
17926a79
DH
120 if (ret < 0) {
121 _leave(" = %d [socket]", ret);
122 return ret;
123 }
124
125 /* if a local address was supplied then bind it */
126 if (local->srx.transport_len > sizeof(sa_family_t)) {
127 _debug("bind");
128 ret = kernel_bind(local->socket,
4f95dd78 129 (struct sockaddr *)&local->srx.transport,
17926a79
DH
130 local->srx.transport_len);
131 if (ret < 0) {
4f95dd78 132 _debug("bind failed %d", ret);
17926a79
DH
133 goto error;
134 }
135 }
136
137 /* we want to receive ICMP errors */
138 opt = 1;
139 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
140 (char *) &opt, sizeof(opt));
141 if (ret < 0) {
142 _debug("setsockopt failed");
143 goto error;
144 }
145
146 /* we want to set the don't fragment bit */
147 opt = IP_PMTUDISC_DO;
148 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
149 (char *) &opt, sizeof(opt));
150 if (ret < 0) {
151 _debug("setsockopt failed");
152 goto error;
153 }
154
17926a79
DH
155 /* set the socket up */
156 sock = local->socket->sk;
157 sock->sk_user_data = local;
158 sock->sk_data_ready = rxrpc_data_ready;
abe89ef0 159 sock->sk_error_report = rxrpc_error_report;
17926a79
DH
160 _leave(" = 0");
161 return 0;
162
163error:
91cf45f0 164 kernel_sock_shutdown(local->socket, SHUT_RDWR);
17926a79
DH
165 local->socket->sk->sk_user_data = NULL;
166 sock_release(local->socket);
167 local->socket = NULL;
168
169 _leave(" = %d", ret);
170 return ret;
171}
172
173/*
4f95dd78 174 * Look up or create a new local endpoint using the specified local address.
17926a79 175 */
2baec2c3
DH
176struct rxrpc_local *rxrpc_lookup_local(struct net *net,
177 const struct sockaddr_rxrpc *srx)
17926a79
DH
178{
179 struct rxrpc_local *local;
2baec2c3 180 struct rxrpc_net *rxnet = rxrpc_net(net);
4f95dd78
DH
181 struct list_head *cursor;
182 const char *age;
183 long diff;
17926a79
DH
184 int ret;
185
75b54cb5
DH
186 _enter("{%d,%d,%pISp}",
187 srx->transport_type, srx->transport.family, &srx->transport);
17926a79 188
2baec2c3 189 mutex_lock(&rxnet->local_mutex);
17926a79 190
2baec2c3
DH
191 for (cursor = rxnet->local_endpoints.next;
192 cursor != &rxnet->local_endpoints;
4f95dd78
DH
193 cursor = cursor->next) {
194 local = list_entry(cursor, struct rxrpc_local, link);
17926a79 195
4f95dd78
DH
196 diff = rxrpc_local_cmp_key(local, srx);
197 if (diff < 0)
17926a79 198 continue;
4f95dd78
DH
199 if (diff > 0)
200 break;
201
202 /* Services aren't allowed to share transport sockets, so
203 * reject that here. It is possible that the object is dying -
204 * but it may also still have the local transport address that
205 * we want bound.
206 */
207 if (srx->srx_service) {
208 local = NULL;
209 goto addr_in_use;
210 }
17926a79 211
4f95dd78
DH
212 /* Found a match. We replace a dying object. Attempting to
213 * bind the transport socket may still fail if we're attempting
214 * to use a local address that the dying object is still using.
215 */
5627cc8b 216 if (!rxrpc_get_local_maybe(local)) {
4f95dd78
DH
217 cursor = cursor->next;
218 list_del_init(&local->link);
219 break;
17926a79 220 }
17926a79 221
4f95dd78
DH
222 age = "old";
223 goto found;
224 }
17926a79 225
2baec2c3 226 local = rxrpc_alloc_local(rxnet, srx);
4f95dd78
DH
227 if (!local)
228 goto nomem;
17926a79 229
2baec2c3 230 ret = rxrpc_open_socket(local, net);
4f95dd78
DH
231 if (ret < 0)
232 goto sock_error;
233
234 list_add_tail(&local->link, cursor);
235 age = "new";
17926a79 236
4f95dd78 237found:
2baec2c3 238 mutex_unlock(&rxnet->local_mutex);
17926a79 239
75b54cb5
DH
240 _net("LOCAL %s %d {%pISp}",
241 age, local->debug_id, &local->srx.transport);
17926a79 242
4f95dd78 243 _leave(" = %p", local);
17926a79
DH
244 return local;
245
4f95dd78
DH
246nomem:
247 ret = -ENOMEM;
248sock_error:
2baec2c3 249 mutex_unlock(&rxnet->local_mutex);
4f95dd78
DH
250 kfree(local);
251 _leave(" = %d", ret);
252 return ERR_PTR(ret);
17926a79 253
4f95dd78 254addr_in_use:
2baec2c3 255 mutex_unlock(&rxnet->local_mutex);
4f95dd78
DH
256 _leave(" = -EADDRINUSE");
257 return ERR_PTR(-EADDRINUSE);
258}
17926a79 259
09d2bf59
DH
260/*
261 * Get a ref on a local endpoint.
262 */
263struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
264{
265 const void *here = __builtin_return_address(0);
266 int n;
267
268 n = atomic_inc_return(&local->usage);
269 trace_rxrpc_local(local, rxrpc_local_got, n, here);
270 return local;
271}
272
273/*
274 * Get a ref on a local endpoint unless its usage has already reached 0.
275 */
276struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
277{
278 const void *here = __builtin_return_address(0);
279
280 if (local) {
281 int n = __atomic_add_unless(&local->usage, 1, 0);
282 if (n > 0)
283 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
284 else
285 local = NULL;
286 }
287 return local;
288}
289
290/*
291 * Queue a local endpoint.
292 */
293void rxrpc_queue_local(struct rxrpc_local *local)
294{
295 const void *here = __builtin_return_address(0);
296
297 if (rxrpc_queue_work(&local->processor))
298 trace_rxrpc_local(local, rxrpc_local_queued,
299 atomic_read(&local->usage), here);
300}
301
4f95dd78
DH
302/*
303 * A local endpoint reached its end of life.
304 */
09d2bf59 305static void __rxrpc_put_local(struct rxrpc_local *local)
4f95dd78
DH
306{
307 _enter("%d", local->debug_id);
308 rxrpc_queue_work(&local->processor);
17926a79
DH
309}
310
09d2bf59
DH
311/*
312 * Drop a ref on a local endpoint.
313 */
314void rxrpc_put_local(struct rxrpc_local *local)
315{
316 const void *here = __builtin_return_address(0);
317 int n;
318
319 if (local) {
320 n = atomic_dec_return(&local->usage);
321 trace_rxrpc_local(local, rxrpc_local_put, n, here);
322
323 if (n == 0)
324 __rxrpc_put_local(local);
325 }
326}
327
17926a79 328/*
4f95dd78
DH
329 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
330 * of.
331 *
332 * Closing the socket cannot be done from bottom half context or RCU callback
333 * context because it might sleep.
17926a79 334 */
4f95dd78 335static void rxrpc_local_destroyer(struct rxrpc_local *local)
17926a79 336{
4f95dd78 337 struct socket *socket = local->socket;
2baec2c3 338 struct rxrpc_net *rxnet = local->rxnet;
17926a79 339
4f95dd78 340 _enter("%d", local->debug_id);
17926a79 341
4f95dd78
DH
342 /* We can get a race between an incoming call packet queueing the
343 * processor again and the work processor starting the destruction
344 * process which will shut down the UDP socket.
345 */
346 if (local->dead) {
347 _leave(" [already dead]");
348 return;
17926a79 349 }
4f95dd78
DH
350 local->dead = true;
351
2baec2c3 352 mutex_lock(&rxnet->local_mutex);
4f95dd78 353 list_del_init(&local->link);
2baec2c3 354 mutex_unlock(&rxnet->local_mutex);
4f95dd78 355
999b69f8 356 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
1e9e5c95 357 ASSERT(!local->service);
4f95dd78
DH
358
359 if (socket) {
360 local->socket = NULL;
361 kernel_sock_shutdown(socket, SHUT_RDWR);
362 socket->sk->sk_user_data = NULL;
363 sock_release(socket);
364 }
365
366 /* At this point, there should be no more packets coming in to the
367 * local endpoint.
368 */
4f95dd78
DH
369 rxrpc_purge_queue(&local->reject_queue);
370 rxrpc_purge_queue(&local->event_queue);
371
372 _debug("rcu local %d", local->debug_id);
373 call_rcu(&local->rcu, rxrpc_local_rcu);
17926a79
DH
374}
375
376/*
4f95dd78 377 * Process events on an endpoint
17926a79 378 */
4f95dd78 379static void rxrpc_local_processor(struct work_struct *work)
17926a79
DH
380{
381 struct rxrpc_local *local =
4f95dd78
DH
382 container_of(work, struct rxrpc_local, processor);
383 bool again;
17926a79 384
09d2bf59
DH
385 trace_rxrpc_local(local, rxrpc_local_processing,
386 atomic_read(&local->usage), NULL);
17926a79 387
4f95dd78
DH
388 do {
389 again = false;
390 if (atomic_read(&local->usage) == 0)
391 return rxrpc_local_destroyer(local);
17926a79 392
4f95dd78
DH
393 if (!skb_queue_empty(&local->reject_queue)) {
394 rxrpc_reject_packets(local);
395 again = true;
396 }
17926a79 397
4f95dd78
DH
398 if (!skb_queue_empty(&local->event_queue)) {
399 rxrpc_process_local_events(local);
400 again = true;
401 }
402 } while (again);
403}
17926a79 404
4f95dd78
DH
405/*
406 * Destroy a local endpoint after the RCU grace period expires.
407 */
408static void rxrpc_local_rcu(struct rcu_head *rcu)
409{
410 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
17926a79 411
4f95dd78 412 _enter("%d", local->debug_id);
17926a79 413
4f95dd78 414 ASSERT(!work_pending(&local->processor));
17926a79
DH
415
416 _net("DESTROY LOCAL %d", local->debug_id);
417 kfree(local);
17926a79
DH
418 _leave("");
419}
420
421/*
4f95dd78 422 * Verify the local endpoint list is empty by this point.
17926a79 423 */
2baec2c3 424void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
17926a79 425{
4f95dd78 426 struct rxrpc_local *local;
17926a79
DH
427
428 _enter("");
429
dee46364 430 flush_workqueue(rxrpc_workqueue);
17926a79 431
2baec2c3
DH
432 if (!list_empty(&rxnet->local_endpoints)) {
433 mutex_lock(&rxnet->local_mutex);
434 list_for_each_entry(local, &rxnet->local_endpoints, link) {
dee46364
DH
435 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
436 local, atomic_read(&local->usage));
437 }
2baec2c3 438 mutex_unlock(&rxnet->local_mutex);
dee46364 439 BUG();
17926a79 440 }
17926a79 441}