dccp/tcp: Fixup bhash2 bucket when connect() fails.
[linux-2.6-block.git] / net / dccp / proto.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
7c657876
ACM
2/*
3 * net/dccp/proto.c
4 *
5 * An implementation of the DCCP protocol
6 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7c657876
ACM
7 */
8
7c657876
ACM
9#include <linux/dccp.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/skbuff.h>
15#include <linux/netdevice.h>
16#include <linux/in.h>
17#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/random.h>
5a0e3ad6 20#include <linux/slab.h>
7c657876
ACM
21#include <net/checksum.h>
22
14c85021 23#include <net/inet_sock.h>
120e9dab 24#include <net/inet_common.h>
7c657876
ACM
25#include <net/sock.h>
26#include <net/xfrm.h>
27
6273172e 28#include <asm/ioctls.h>
7c657876
ACM
29#include <linux/spinlock.h>
30#include <linux/timer.h>
31#include <linux/delay.h>
32#include <linux/poll.h>
7c657876
ACM
33
34#include "ccid.h"
35#include "dccp.h"
afe00251 36#include "feat.h"
7c657876 37
ee549be6
MH
38#define CREATE_TRACE_POINTS
39#include "trace.h"
40
ba89966c 41DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
7c657876 42
f21e68ca
ACM
43EXPORT_SYMBOL_GPL(dccp_statistics);
44
19757ceb
ED
45DEFINE_PER_CPU(unsigned int, dccp_orphan_count);
46EXPORT_PER_CPU_SYMBOL_GPL(dccp_orphan_count);
f21e68ca 47
5caea4ea 48struct inet_hashinfo dccp_hashinfo;
075ae866
ACM
49EXPORT_SYMBOL_GPL(dccp_hashinfo);
50
b1308dc0
IM
51/* the maximum queue length for tx in packets. 0 is no limit */
52int sysctl_dccp_tx_qlen __read_mostly = 5;
53
1f4f0f64 54#ifdef CONFIG_IP_DCCP_DEBUG
55static const char *dccp_state_name(const int state)
56{
57 static const char *const dccp_state_names[] = {
58 [DCCP_OPEN] = "OPEN",
59 [DCCP_REQUESTING] = "REQUESTING",
60 [DCCP_PARTOPEN] = "PARTOPEN",
61 [DCCP_LISTEN] = "LISTEN",
62 [DCCP_RESPOND] = "RESPOND",
63 [DCCP_CLOSING] = "CLOSING",
64 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
65 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
66 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
67 [DCCP_TIME_WAIT] = "TIME_WAIT",
68 [DCCP_CLOSED] = "CLOSED",
69 };
70
71 if (state >= DCCP_MAX_STATES)
72 return "INVALID STATE!";
73 else
74 return dccp_state_names[state];
75}
76#endif
77
c25a18ba
ACM
78void dccp_set_state(struct sock *sk, const int state)
79{
80 const int oldstate = sk->sk_state;
81
f11135a3 82 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
c25a18ba
ACM
83 dccp_state_name(oldstate), dccp_state_name(state));
84 WARN_ON(state == oldstate);
85
86 switch (state) {
87 case DCCP_OPEN:
88 if (oldstate != DCCP_OPEN)
89 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
6eb55d17
GR
90 /* Client retransmits all Confirm options until entering OPEN */
91 if (oldstate == DCCP_PARTOPEN)
92 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
c25a18ba
ACM
93 break;
94
95 case DCCP_CLOSED:
0c869620
GR
96 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
97 oldstate == DCCP_CLOSING)
c25a18ba
ACM
98 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
99
100 sk->sk_prot->unhash(sk);
101 if (inet_csk(sk)->icsk_bind_hash != NULL &&
102 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
ab1e0a13 103 inet_put_port(sk);
df561f66 104 fallthrough;
c25a18ba
ACM
105 default:
106 if (oldstate == DCCP_OPEN)
107 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
108 }
109
110 /* Change state AFTER socket is unhashed to avoid closed
111 * socket sitting in hash tables.
112 */
b0832e30 113 inet_sk_set_state(sk, state);
c25a18ba
ACM
114}
115
116EXPORT_SYMBOL_GPL(dccp_set_state);
117
0c869620
GR
118static void dccp_finish_passive_close(struct sock *sk)
119{
120 switch (sk->sk_state) {
121 case DCCP_PASSIVE_CLOSE:
122 /* Node (client or server) has received Close packet. */
123 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
124 dccp_set_state(sk, DCCP_CLOSED);
125 break;
126 case DCCP_PASSIVE_CLOSEREQ:
127 /*
128 * Client received CloseReq. We set the `active' flag so that
129 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
130 */
131 dccp_send_close(sk, 1);
132 dccp_set_state(sk, DCCP_CLOSING);
133 }
134}
135
c25a18ba
ACM
136void dccp_done(struct sock *sk)
137{
138 dccp_set_state(sk, DCCP_CLOSED);
139 dccp_clear_xmit_timers(sk);
140
141 sk->sk_shutdown = SHUTDOWN_MASK;
142
143 if (!sock_flag(sk, SOCK_DEAD))
144 sk->sk_state_change(sk);
145 else
146 inet_csk_destroy_sock(sk);
147}
148
149EXPORT_SYMBOL_GPL(dccp_done);
150
7c657876
ACM
151const char *dccp_packet_name(const int type)
152{
36cbd3dc 153 static const char *const dccp_packet_names[] = {
7c657876
ACM
154 [DCCP_PKT_REQUEST] = "REQUEST",
155 [DCCP_PKT_RESPONSE] = "RESPONSE",
156 [DCCP_PKT_DATA] = "DATA",
157 [DCCP_PKT_ACK] = "ACK",
158 [DCCP_PKT_DATAACK] = "DATAACK",
159 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
160 [DCCP_PKT_CLOSE] = "CLOSE",
161 [DCCP_PKT_RESET] = "RESET",
162 [DCCP_PKT_SYNC] = "SYNC",
163 [DCCP_PKT_SYNCACK] = "SYNCACK",
164 };
165
166 if (type >= DCCP_NR_PKT_TYPES)
167 return "INVALID";
168 else
169 return dccp_packet_names[type];
170}
171
172EXPORT_SYMBOL_GPL(dccp_packet_name);
173
120e9dab
ED
174static void dccp_sk_destruct(struct sock *sk)
175{
176 struct dccp_sock *dp = dccp_sk(sk);
177
178 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
179 dp->dccps_hc_tx_ccid = NULL;
180 inet_sock_destruct(sk);
181}
182
72478873 183int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
3e0fadc5
ACM
184{
185 struct dccp_sock *dp = dccp_sk(sk);
186 struct inet_connection_sock *icsk = inet_csk(sk);
3e0fadc5 187
e18d7a98
ACM
188 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
189 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
190 sk->sk_state = DCCP_CLOSED;
191 sk->sk_write_space = dccp_write_space;
120e9dab 192 sk->sk_destruct = dccp_sk_destruct;
e18d7a98 193 icsk->icsk_sync_mss = dccp_sync_mss;
410e27a4 194 dp->dccps_mss_cache = 536;
e18d7a98
ACM
195 dp->dccps_rate_last = jiffies;
196 dp->dccps_role = DCCP_ROLE_UNDEFINED;
197 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
871a2c16 198 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
e18d7a98
ACM
199
200 dccp_init_xmit_timers(sk);
201
ac75773c 202 INIT_LIST_HEAD(&dp->dccps_featneg);
6eb55d17
GR
203 /* control socket doesn't need feat nego */
204 if (likely(ctl_sock_initialized))
205 return dccp_feat_init(sk);
3e0fadc5
ACM
206 return 0;
207}
208
209EXPORT_SYMBOL_GPL(dccp_init_sock);
210
7d06b2e0 211void dccp_destroy_sock(struct sock *sk)
3e0fadc5
ACM
212{
213 struct dccp_sock *dp = dccp_sk(sk);
214
7749d4ff 215 __skb_queue_purge(&sk->sk_write_queue);
3e0fadc5
ACM
216 if (sk->sk_send_head != NULL) {
217 kfree_skb(sk->sk_send_head);
218 sk->sk_send_head = NULL;
219 }
220
221 /* Clean up a referenced DCCP bind bucket. */
222 if (inet_csk(sk)->icsk_bind_hash != NULL)
ab1e0a13 223 inet_put_port(sk);
3e0fadc5
ACM
224
225 kfree(dp->dccps_service_list);
226 dp->dccps_service_list = NULL;
227
6fdd34d4 228 if (dp->dccps_hc_rx_ackvec != NULL) {
3e0fadc5
ACM
229 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
230 dp->dccps_hc_rx_ackvec = NULL;
231 }
232 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
120e9dab 233 dp->dccps_hc_rx_ccid = NULL;
3e0fadc5
ACM
234
235 /* clean up feature negotiation state */
d99a7bd2 236 dccp_feat_list_purge(&dp->dccps_featneg);
3e0fadc5
ACM
237}
238
239EXPORT_SYMBOL_GPL(dccp_destroy_sock);
240
ce865a61
GR
241static inline int dccp_need_reset(int state)
242{
243 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
244 state != DCCP_REQUESTING;
245}
246
7c657876
ACM
247int dccp_disconnect(struct sock *sk, int flags)
248{
249 struct inet_connection_sock *icsk = inet_csk(sk);
250 struct inet_sock *inet = inet_sk(sk);
69c64866 251 struct dccp_sock *dp = dccp_sk(sk);
7c657876
ACM
252 const int old_state = sk->sk_state;
253
254 if (old_state != DCCP_CLOSED)
255 dccp_set_state(sk, DCCP_CLOSED);
256
ce865a61
GR
257 /*
258 * This corresponds to the ABORT function of RFC793, sec. 3.8
259 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
260 */
7c657876
ACM
261 if (old_state == DCCP_LISTEN) {
262 inet_csk_listen_stop(sk);
ce865a61
GR
263 } else if (dccp_need_reset(old_state)) {
264 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
265 sk->sk_err = ECONNRESET;
7c657876
ACM
266 } else if (old_state == DCCP_REQUESTING)
267 sk->sk_err = ECONNRESET;
268
269 dccp_clear_xmit_timers(sk);
69c64866 270 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
69c64866 271 dp->dccps_hc_rx_ccid = NULL;
48816322 272
7c657876 273 __skb_queue_purge(&sk->sk_receive_queue);
48816322 274 __skb_queue_purge(&sk->sk_write_queue);
7c657876
ACM
275 if (sk->sk_send_head != NULL) {
276 __kfree_skb(sk->sk_send_head);
277 sk->sk_send_head = NULL;
278 }
279
c720c7e8 280 inet->inet_dport = 0;
7c657876 281
e0833d1f 282 inet_bhash2_reset_saddr(sk);
7c657876
ACM
283
284 sk->sk_shutdown = 0;
285 sock_reset_flag(sk, SOCK_DONE);
286
287 icsk->icsk_backoff = 0;
288 inet_csk_delack_init(sk);
289 __sk_dst_reset(sk);
290
c720c7e8 291 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
7c657876 292
e3ae2365 293 sk_error_report(sk);
3285a9aa 294 return 0;
7c657876
ACM
295}
296
f21e68ca
ACM
297EXPORT_SYMBOL_GPL(dccp_disconnect);
298
a11e1d43
LT
299/*
300 * Wait for a DCCP event.
301 *
302 * Note that we don't need to lock the socket, as the upper poll layers
303 * take care of normal races (between the test and the event) and we don't
304 * go look at any of the socket buffers directly.
305 */
306__poll_t dccp_poll(struct file *file, struct socket *sock,
307 poll_table *wait)
331968bd 308{
ade994f4 309 __poll_t mask;
331968bd
ACM
310 struct sock *sk = sock->sk;
311
89ab066d 312 sock_poll_wait(file, sock, wait);
331968bd
ACM
313 if (sk->sk_state == DCCP_LISTEN)
314 return inet_csk_listen_poll(sk);
315
316 /* Socket is not locked. We are protected from async events
317 by poll logic and correct handling of state changes
318 made by another threads is impossible in any case.
319 */
320
321 mask = 0;
322 if (sk->sk_err)
a9a08845 323 mask = EPOLLERR;
331968bd
ACM
324
325 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
a9a08845 326 mask |= EPOLLHUP;
331968bd 327 if (sk->sk_shutdown & RCV_SHUTDOWN)
a9a08845 328 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
331968bd
ACM
329
330 /* Connected? */
331 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
332 if (atomic_read(&sk->sk_rmem_alloc) > 0)
a9a08845 333 mask |= EPOLLIN | EPOLLRDNORM;
331968bd
ACM
334
335 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
64dc6130 336 if (sk_stream_is_writeable(sk)) {
a9a08845 337 mask |= EPOLLOUT | EPOLLWRNORM;
331968bd 338 } else { /* send SIGIO later */
9cd3e072 339 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
331968bd
ACM
340 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
341
342 /* Race breaker. If space is freed after
343 * wspace test but before the flags are set,
344 * IO signal will be lost.
345 */
64dc6130 346 if (sk_stream_is_writeable(sk))
a9a08845 347 mask |= EPOLLOUT | EPOLLWRNORM;
331968bd
ACM
348 }
349 }
350 }
351 return mask;
352}
353
a11e1d43 354EXPORT_SYMBOL_GPL(dccp_poll);
f21e68ca 355
7c657876
ACM
356int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
357{
6273172e
ACM
358 int rc = -ENOTCONN;
359
360 lock_sock(sk);
361
362 if (sk->sk_state == DCCP_LISTEN)
363 goto out;
364
365 switch (cmd) {
749c08f8
RS
366 case SIOCOUTQ: {
367 int amount = sk_wmem_alloc_get(sk);
368 /* Using sk_wmem_alloc here because sk_wmem_queued is not used by DCCP and
369 * always 0, comparably to UDP.
370 */
371
372 rc = put_user(amount, (int __user *)arg);
373 }
374 break;
6273172e
ACM
375 case SIOCINQ: {
376 struct sk_buff *skb;
377 unsigned long amount = 0;
378
379 skb = skb_peek(&sk->sk_receive_queue);
380 if (skb != NULL) {
381 /*
382 * We will only return the amount of this packet since
383 * that is all that will be read.
384 */
385 amount = skb->len;
386 }
387 rc = put_user(amount, (int __user *)arg);
388 }
389 break;
390 default:
391 rc = -ENOIOCTLCMD;
392 break;
393 }
394out:
395 release_sock(sk);
396 return rc;
7c657876
ACM
397}
398
f21e68ca
ACM
399EXPORT_SYMBOL_GPL(dccp_ioctl);
400
60fe62e7 401static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
a7b75c5a 402 sockptr_t optval, unsigned int optlen)
67e6b629
ACM
403{
404 struct dccp_sock *dp = dccp_sk(sk);
405 struct dccp_service_list *sl = NULL;
406
8109b02b 407 if (service == DCCP_SERVICE_INVALID_VALUE ||
67e6b629
ACM
408 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
409 return -EINVAL;
410
411 if (optlen > sizeof(service)) {
412 sl = kmalloc(optlen, GFP_KERNEL);
413 if (sl == NULL)
414 return -ENOMEM;
415
416 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
d3c48151
CH
417 if (copy_from_sockptr_offset(sl->dccpsl_list, optval,
418 sizeof(service), optlen - sizeof(service)) ||
67e6b629
ACM
419 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
420 kfree(sl);
421 return -EFAULT;
422 }
423 }
424
425 lock_sock(sk);
426 dp->dccps_service = service;
427
a51482bd 428 kfree(dp->dccps_service_list);
67e6b629
ACM
429
430 dp->dccps_service_list = sl;
431 release_sock(sk);
432 return 0;
433}
434
29450559
GR
435static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
436{
437 u8 *list, len;
438 int i, rc;
439
440 if (cscov < 0 || cscov > 15)
441 return -EINVAL;
442 /*
443 * Populate a list of permissible values, in the range cscov...15. This
444 * is necessary since feature negotiation of single values only works if
445 * both sides incidentally choose the same value. Since the list starts
446 * lowest-value first, negotiation will pick the smallest shared value.
447 */
448 if (cscov == 0)
449 return 0;
450 len = 16 - cscov;
451
452 list = kmalloc(len, GFP_KERNEL);
453 if (list == NULL)
454 return -ENOBUFS;
455
456 for (i = 0; i < len; i++)
457 list[i] = cscov++;
458
459 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
460
461 if (rc == 0) {
462 if (rx)
463 dccp_sk(sk)->dccps_pcrlen = cscov;
464 else
465 dccp_sk(sk)->dccps_pcslen = cscov;
466 }
467 kfree(list);
468 return rc;
469}
470
b20a9c24 471static int dccp_setsockopt_ccid(struct sock *sk, int type,
a7b75c5a 472 sockptr_t optval, unsigned int optlen)
b20a9c24
GR
473{
474 u8 *val;
475 int rc = 0;
476
477 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
478 return -EINVAL;
479
a7b75c5a 480 val = memdup_sockptr(optval, optlen);
042604d2
JL
481 if (IS_ERR(val))
482 return PTR_ERR(val);
b20a9c24
GR
483
484 lock_sock(sk);
485 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
486 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
487
488 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
489 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
490 release_sock(sk);
491
492 kfree(val);
493 return rc;
494}
495
3fdadf7d 496static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
a7b75c5a 497 sockptr_t optval, unsigned int optlen)
7c657876 498{
09dbc389
GR
499 struct dccp_sock *dp = dccp_sk(sk);
500 int val, err = 0;
7c657876 501
19102996
GR
502 switch (optname) {
503 case DCCP_SOCKOPT_PACKET_SIZE:
504 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
505 return 0;
506 case DCCP_SOCKOPT_CHANGE_L:
507 case DCCP_SOCKOPT_CHANGE_R:
508 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
509 return 0;
b20a9c24
GR
510 case DCCP_SOCKOPT_CCID:
511 case DCCP_SOCKOPT_RX_CCID:
512 case DCCP_SOCKOPT_TX_CCID:
513 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
19102996
GR
514 }
515
516 if (optlen < (int)sizeof(int))
a84ffe43
ACM
517 return -EINVAL;
518
a7b75c5a 519 if (copy_from_sockptr(&val, optval, sizeof(int)))
a84ffe43
ACM
520 return -EFAULT;
521
67e6b629
ACM
522 if (optname == DCCP_SOCKOPT_SERVICE)
523 return dccp_setsockopt_service(sk, val, optval, optlen);
a84ffe43 524
67e6b629 525 lock_sock(sk);
a84ffe43 526 switch (optname) {
b8599d20
GR
527 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
528 if (dp->dccps_role != DCCP_ROLE_SERVER)
529 err = -EOPNOTSUPP;
530 else
531 dp->dccps_server_timewait = (val != 0);
532 break;
29450559
GR
533 case DCCP_SOCKOPT_SEND_CSCOV:
534 err = dccp_setsockopt_cscov(sk, val, false);
d6da3511 535 break;
29450559
GR
536 case DCCP_SOCKOPT_RECV_CSCOV:
537 err = dccp_setsockopt_cscov(sk, val, true);
d6da3511 538 break;
871a2c16
TG
539 case DCCP_SOCKOPT_QPOLICY_ID:
540 if (sk->sk_state != DCCP_CLOSED)
541 err = -EISCONN;
542 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
543 err = -EINVAL;
544 else
545 dp->dccps_qpolicy = val;
546 break;
547 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
548 if (val < 0)
549 err = -EINVAL;
550 else
551 dp->dccps_tx_qlen = val;
552 break;
a84ffe43
ACM
553 default:
554 err = -ENOPROTOOPT;
555 break;
556 }
410e27a4 557 release_sock(sk);
19102996 558
a84ffe43 559 return err;
7c657876
ACM
560}
561
a7b75c5a
CH
562int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
563 unsigned int optlen)
3fdadf7d
DM
564{
565 if (level != SOL_DCCP)
566 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
567 optname, optval,
568 optlen);
569 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
570}
543d9cfe 571
f21e68ca
ACM
572EXPORT_SYMBOL_GPL(dccp_setsockopt);
573
67e6b629 574static int dccp_getsockopt_service(struct sock *sk, int len,
60fe62e7 575 __be32 __user *optval,
67e6b629
ACM
576 int __user *optlen)
577{
578 const struct dccp_sock *dp = dccp_sk(sk);
579 const struct dccp_service_list *sl;
580 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
581
582 lock_sock(sk);
67e6b629
ACM
583 if ((sl = dp->dccps_service_list) != NULL) {
584 slen = sl->dccpsl_nr * sizeof(u32);
585 total_len += slen;
586 }
587
588 err = -EINVAL;
589 if (total_len > len)
590 goto out;
591
592 err = 0;
593 if (put_user(total_len, optlen) ||
594 put_user(dp->dccps_service, optval) ||
595 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
596 err = -EFAULT;
597out:
598 release_sock(sk);
599 return err;
600}
601
3fdadf7d 602static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
a1d3a355 603 char __user *optval, int __user *optlen)
7c657876 604{
a84ffe43
ACM
605 struct dccp_sock *dp;
606 int val, len;
7c657876 607
a84ffe43
ACM
608 if (get_user(len, optlen))
609 return -EFAULT;
610
39ebc027 611 if (len < (int)sizeof(int))
a84ffe43
ACM
612 return -EINVAL;
613
614 dp = dccp_sk(sk);
615
616 switch (optname) {
617 case DCCP_SOCKOPT_PACKET_SIZE:
5aed3243 618 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
841bac1d 619 return 0;
88f964db
ACM
620 case DCCP_SOCKOPT_SERVICE:
621 return dccp_getsockopt_service(sk, len,
60fe62e7 622 (__be32 __user *)optval, optlen);
7c559a9e
GR
623 case DCCP_SOCKOPT_GET_CUR_MPS:
624 val = dp->dccps_mss_cache;
7c559a9e 625 break;
d90ebcbf
GR
626 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
627 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
71c262a3
GR
628 case DCCP_SOCKOPT_TX_CCID:
629 val = ccid_get_current_tx_ccid(dp);
630 if (val < 0)
631 return -ENOPROTOOPT;
632 break;
633 case DCCP_SOCKOPT_RX_CCID:
634 val = ccid_get_current_rx_ccid(dp);
635 if (val < 0)
636 return -ENOPROTOOPT;
637 break;
b8599d20
GR
638 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
639 val = dp->dccps_server_timewait;
b8599d20 640 break;
6f4e5fff
GR
641 case DCCP_SOCKOPT_SEND_CSCOV:
642 val = dp->dccps_pcslen;
643 break;
644 case DCCP_SOCKOPT_RECV_CSCOV:
645 val = dp->dccps_pcrlen;
646 break;
871a2c16
TG
647 case DCCP_SOCKOPT_QPOLICY_ID:
648 val = dp->dccps_qpolicy;
649 break;
650 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
651 val = dp->dccps_tx_qlen;
652 break;
88f964db
ACM
653 case 128 ... 191:
654 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
655 len, (u32 __user *)optval, optlen);
656 case 192 ... 255:
657 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
658 len, (u32 __user *)optval, optlen);
a84ffe43
ACM
659 default:
660 return -ENOPROTOOPT;
661 }
662
79133506 663 len = sizeof(val);
a84ffe43
ACM
664 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
665 return -EFAULT;
666
667 return 0;
7c657876
ACM
668}
669
3fdadf7d
DM
670int dccp_getsockopt(struct sock *sk, int level, int optname,
671 char __user *optval, int __user *optlen)
672{
673 if (level != SOL_DCCP)
674 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
675 optname, optval,
676 optlen);
677 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
678}
543d9cfe 679
f21e68ca
ACM
680EXPORT_SYMBOL_GPL(dccp_getsockopt);
681
871a2c16
TG
682static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
683{
f95b414e 684 struct cmsghdr *cmsg;
871a2c16
TG
685
686 /*
687 * Assign an (opaque) qpolicy priority value to skb->priority.
688 *
689 * We are overloading this skb field for use with the qpolicy subystem.
690 * The skb->priority is normally used for the SO_PRIORITY option, which
691 * is initialised from sk_priority. Since the assignment of sk_priority
692 * to skb->priority happens later (on layer 3), we overload this field
693 * for use with queueing priorities as long as the skb is on layer 4.
694 * The default priority value (if nothing is set) is 0.
695 */
696 skb->priority = 0;
697
f95b414e 698 for_each_cmsghdr(cmsg, msg) {
871a2c16
TG
699 if (!CMSG_OK(msg, cmsg))
700 return -EINVAL;
701
702 if (cmsg->cmsg_level != SOL_DCCP)
703 continue;
704
04910265
TG
705 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
706 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
707 return -EINVAL;
708
871a2c16
TG
709 switch (cmsg->cmsg_type) {
710 case DCCP_SCM_PRIORITY:
711 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
712 return -EINVAL;
713 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
714 break;
715 default:
716 return -EINVAL;
717 }
718 }
719 return 0;
720}
721
1b784140 722int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
7c657876
ACM
723{
724 const struct dccp_sock *dp = dccp_sk(sk);
725 const int flags = msg->msg_flags;
726 const int noblock = flags & MSG_DONTWAIT;
727 struct sk_buff *skb;
728 int rc, size;
729 long timeo;
730
ee549be6
MH
731 trace_dccp_probe(sk, len);
732
7c657876
ACM
733 if (len > dp->dccps_mss_cache)
734 return -EMSGSIZE;
735
736 lock_sock(sk);
b1308dc0 737
27258ee5 738 timeo = sock_sndtimeo(sk, noblock);
7c657876
ACM
739
740 /*
741 * We have to use sk_stream_wait_connect here to set sk_write_pending,
742 * so that the trick in dccp_rcv_request_sent_state_process.
743 */
744 /* Wait for a connection to finish. */
cecd8d0e 745 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
7c657876 746 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
27258ee5 747 goto out_release;
7c657876
ACM
748
749 size = sk->sk_prot->max_header + len;
750 release_sock(sk);
751 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
752 lock_sock(sk);
7c657876
ACM
753 if (skb == NULL)
754 goto out_release;
755
a41b17ff
HH
756 if (dccp_qpolicy_full(sk)) {
757 rc = -EAGAIN;
758 goto out_discard;
759 }
760
67f93df7
AK
761 if (sk->sk_state == DCCP_CLOSED) {
762 rc = -ENOTCONN;
763 goto out_discard;
764 }
765
7c657876 766 skb_reserve(skb, sk->sk_prot->max_header);
6ce8e9ce 767 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
27258ee5
ACM
768 if (rc != 0)
769 goto out_discard;
770
871a2c16
TG
771 rc = dccp_msghdr_parse(msg, skb);
772 if (rc != 0)
773 goto out_discard;
774
775 dccp_qpolicy_push(sk, skb);
b1fcf55e
GR
776 /*
777 * The xmit_timer is set if the TX CCID is rate-based and will expire
778 * when congestion control permits to release further packets into the
779 * network. Window-based CCIDs do not use this timer.
780 */
781 if (!timer_pending(&dp->dccps_xmit_timer))
782 dccp_write_xmit(sk);
7c657876
ACM
783out_release:
784 release_sock(sk);
785 return rc ? : len;
27258ee5
ACM
786out_discard:
787 kfree_skb(skb);
7c657876 788 goto out_release;
7c657876
ACM
789}
790
f21e68ca
ACM
791EXPORT_SYMBOL_GPL(dccp_sendmsg);
792
ec095263
OH
793int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
794 int *addr_len)
7c657876
ACM
795{
796 const struct dccp_hdr *dh;
7c657876
ACM
797 long timeo;
798
799 lock_sock(sk);
800
531669a0
ACM
801 if (sk->sk_state == DCCP_LISTEN) {
802 len = -ENOTCONN;
7c657876 803 goto out;
7c657876 804 }
7c657876 805
ec095263 806 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
7c657876
ACM
807
808 do {
531669a0 809 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
7c657876 810
531669a0
ACM
811 if (skb == NULL)
812 goto verify_sock_status;
7c657876 813
531669a0 814 dh = dccp_hdr(skb);
7c657876 815
0c869620
GR
816 switch (dh->dccph_type) {
817 case DCCP_PKT_DATA:
818 case DCCP_PKT_DATAACK:
531669a0 819 goto found_ok_skb;
7c657876 820
0c869620
GR
821 case DCCP_PKT_CLOSE:
822 case DCCP_PKT_CLOSEREQ:
823 if (!(flags & MSG_PEEK))
824 dccp_finish_passive_close(sk);
df561f66 825 fallthrough;
0c869620
GR
826 case DCCP_PKT_RESET:
827 dccp_pr_debug("found fin (%s) ok!\n",
828 dccp_packet_name(dh->dccph_type));
531669a0
ACM
829 len = 0;
830 goto found_fin_ok;
0c869620
GR
831 default:
832 dccp_pr_debug("packet_type=%s\n",
833 dccp_packet_name(dh->dccph_type));
7bced397 834 sk_eat_skb(sk, skb);
531669a0 835 }
531669a0
ACM
836verify_sock_status:
837 if (sock_flag(sk, SOCK_DONE)) {
838 len = 0;
7c657876 839 break;
531669a0 840 }
7c657876 841
531669a0
ACM
842 if (sk->sk_err) {
843 len = sock_error(sk);
844 break;
845 }
7c657876 846
531669a0
ACM
847 if (sk->sk_shutdown & RCV_SHUTDOWN) {
848 len = 0;
849 break;
850 }
7c657876 851
531669a0
ACM
852 if (sk->sk_state == DCCP_CLOSED) {
853 if (!sock_flag(sk, SOCK_DONE)) {
854 /* This occurs when user tries to read
855 * from never connected socket.
856 */
857 len = -ENOTCONN;
7c657876
ACM
858 break;
859 }
531669a0
ACM
860 len = 0;
861 break;
7c657876
ACM
862 }
863
531669a0
ACM
864 if (!timeo) {
865 len = -EAGAIN;
866 break;
867 }
7c657876 868
531669a0
ACM
869 if (signal_pending(current)) {
870 len = sock_intr_errno(timeo);
871 break;
872 }
7c657876 873
dfbafc99 874 sk_wait_data(sk, &timeo, NULL);
7c657876 875 continue;
7c657876 876 found_ok_skb:
531669a0
ACM
877 if (len > skb->len)
878 len = skb->len;
879 else if (len < skb->len)
880 msg->msg_flags |= MSG_TRUNC;
881
51f3d02b 882 if (skb_copy_datagram_msg(skb, 0, msg, len)) {
531669a0
ACM
883 /* Exception. Bailout! */
884 len = -EFAULT;
885 break;
7c657876 886 }
55d95590
GR
887 if (flags & MSG_TRUNC)
888 len = skb->len;
7c657876
ACM
889 found_fin_ok:
890 if (!(flags & MSG_PEEK))
7bced397 891 sk_eat_skb(sk, skb);
7c657876 892 break;
531669a0 893 } while (1);
7c657876
ACM
894out:
895 release_sock(sk);
531669a0 896 return len;
7c657876
ACM
897}
898
f21e68ca
ACM
899EXPORT_SYMBOL_GPL(dccp_recvmsg);
900
901int inet_dccp_listen(struct socket *sock, int backlog)
7c657876
ACM
902{
903 struct sock *sk = sock->sk;
904 unsigned char old_state;
905 int err;
906
907 lock_sock(sk);
908
909 err = -EINVAL;
910 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
911 goto out;
912
913 old_state = sk->sk_state;
914 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
915 goto out;
916
099ecf59 917 WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
7c657876
ACM
918 /* Really, if the socket is already in listen state
919 * we can only allow the backlog to be adjusted.
920 */
921 if (old_state != DCCP_LISTEN) {
b4a8e749
KI
922 struct dccp_sock *dp = dccp_sk(sk);
923
924 dp->dccps_role = DCCP_ROLE_LISTEN;
925
926 /* do not start to listen if feature negotiation setup fails */
927 if (dccp_feat_finalise_settings(dp)) {
928 err = -EPROTO;
929 goto out;
930 }
931
932 err = inet_csk_listen_start(sk);
7c657876
ACM
933 if (err)
934 goto out;
935 }
7c657876
ACM
936 err = 0;
937
938out:
939 release_sock(sk);
940 return err;
941}
942
f21e68ca
ACM
943EXPORT_SYMBOL_GPL(inet_dccp_listen);
944
0c869620 945static void dccp_terminate_connection(struct sock *sk)
7c657876 946{
0c869620 947 u8 next_state = DCCP_CLOSED;
7c657876 948
0c869620
GR
949 switch (sk->sk_state) {
950 case DCCP_PASSIVE_CLOSE:
951 case DCCP_PASSIVE_CLOSEREQ:
952 dccp_finish_passive_close(sk);
953 break;
954 case DCCP_PARTOPEN:
955 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
956 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
df561f66 957 fallthrough;
0c869620
GR
958 case DCCP_OPEN:
959 dccp_send_close(sk, 1);
7c657876 960
b8599d20
GR
961 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
962 !dccp_sk(sk)->dccps_server_timewait)
0c869620
GR
963 next_state = DCCP_ACTIVE_CLOSEREQ;
964 else
965 next_state = DCCP_CLOSING;
df561f66 966 fallthrough;
0c869620
GR
967 default:
968 dccp_set_state(sk, next_state);
969 }
7c657876
ACM
970}
971
972void dccp_close(struct sock *sk, long timeout)
973{
97e5848d 974 struct dccp_sock *dp = dccp_sk(sk);
7c657876 975 struct sk_buff *skb;
d83bd95b 976 u32 data_was_unread = 0;
134af346 977 int state;
7c657876
ACM
978
979 lock_sock(sk);
980
981 sk->sk_shutdown = SHUTDOWN_MASK;
982
983 if (sk->sk_state == DCCP_LISTEN) {
984 dccp_set_state(sk, DCCP_CLOSED);
985
986 /* Special case. */
987 inet_csk_listen_stop(sk);
988
989 goto adjudge_to_death;
990 }
991
97e5848d
IM
992 sk_stop_timer(sk, &dp->dccps_xmit_timer);
993
7c657876
ACM
994 /*
995 * We need to flush the recv. buffs. We do this only on the
996 * descriptor close, not protocol-sourced closes, because the
997 *reader process may not have drained the data yet!
998 */
7c657876 999 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
d83bd95b 1000 data_was_unread += skb->len;
7c657876
ACM
1001 __kfree_skb(skb);
1002 }
1003
346da62c
ED
1004 /* If socket has been already reset kill it. */
1005 if (sk->sk_state == DCCP_CLOSED)
1006 goto adjudge_to_death;
1007
d83bd95b
GR
1008 if (data_was_unread) {
1009 /* Unread data was tossed, send an appropriate Reset Code */
2f34b329 1010 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
d83bd95b
GR
1011 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1012 dccp_set_state(sk, DCCP_CLOSED);
1013 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
7c657876
ACM
1014 /* Check zero linger _after_ checking for unread data. */
1015 sk->sk_prot->disconnect(sk, 0);
0c869620 1016 } else if (sk->sk_state != DCCP_CLOSED) {
b1fcf55e
GR
1017 /*
1018 * Normal connection termination. May need to wait if there are
1019 * still packets in the TX queue that are delayed by the CCID.
1020 */
1021 dccp_flush_write_queue(sk, &timeout);
0c869620 1022 dccp_terminate_connection(sk);
7c657876
ACM
1023 }
1024
b1fcf55e
GR
1025 /*
1026 * Flush write queue. This may be necessary in several cases:
1027 * - we have been closed by the peer but still have application data;
1028 * - abortive termination (unread data or zero linger time),
1029 * - normal termination but queue could not be flushed within time limit
1030 */
1031 __skb_queue_purge(&sk->sk_write_queue);
1032
7c657876
ACM
1033 sk_stream_wait_close(sk, timeout);
1034
1035adjudge_to_death:
134af346
HX
1036 state = sk->sk_state;
1037 sock_hold(sk);
1038 sock_orphan(sk);
134af346 1039
7ad07e7c
ACM
1040 /*
1041 * It is the last release_sock in its life. It will remove backlog.
1042 */
7c657876
ACM
1043 release_sock(sk);
1044 /*
1045 * Now socket is owned by kernel and we acquire BH lock
1046 * to finish close. No need to check for user refs.
1047 */
1048 local_bh_disable();
1049 bh_lock_sock(sk);
547b792c 1050 WARN_ON(sock_owned_by_user(sk));
7c657876 1051
19757ceb 1052 this_cpu_inc(dccp_orphan_count);
eb4dea58 1053
134af346
HX
1054 /* Have we already been destroyed by a softirq or backlog? */
1055 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1056 goto out;
7ad07e7c 1057
7c657876
ACM
1058 if (sk->sk_state == DCCP_CLOSED)
1059 inet_csk_destroy_sock(sk);
1060
1061 /* Otherwise, socket is reprieved until protocol close. */
1062
134af346 1063out:
7c657876
ACM
1064 bh_unlock_sock(sk);
1065 local_bh_enable();
1066 sock_put(sk);
1067}
1068
f21e68ca
ACM
1069EXPORT_SYMBOL_GPL(dccp_close);
1070
7c657876
ACM
1071void dccp_shutdown(struct sock *sk, int how)
1072{
8e8c71f1 1073 dccp_pr_debug("called shutdown(%x)\n", how);
7c657876
ACM
1074}
1075
f21e68ca
ACM
1076EXPORT_SYMBOL_GPL(dccp_shutdown);
1077
0c5b8a46 1078static inline int __init dccp_mib_init(void)
7c657876 1079{
698365fa
WC
1080 dccp_statistics = alloc_percpu(struct dccp_mib);
1081 if (!dccp_statistics)
1082 return -ENOMEM;
1083 return 0;
7c657876
ACM
1084}
1085
24e8b7e4 1086static inline void dccp_mib_exit(void)
46f09ffa 1087{
698365fa 1088 free_percpu(dccp_statistics);
46f09ffa
ACM
1089}
1090
7c657876
ACM
1091static int thash_entries;
1092module_param(thash_entries, int, 0444);
1093MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1094
a1d3a355 1095#ifdef CONFIG_IP_DCCP_DEBUG
eb939922 1096bool dccp_debug;
43264991 1097module_param(dccp_debug, bool, 0644);
7c657876 1098MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
f21e68ca
ACM
1099
1100EXPORT_SYMBOL_GPL(dccp_debug);
a1d3a355 1101#endif
7c657876
ACM
1102
1103static int __init dccp_init(void)
1104{
1105 unsigned long goal;
ca79b0c2 1106 unsigned long nr_pages = totalram_pages();
7c657876 1107 int ehash_order, bhash_order, i;
dd24c001 1108 int rc;
7c657876 1109
028b0275 1110 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
c593642c 1111 sizeof_field(struct sk_buff, cb));
c92c81df
PO
1112 rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
1113 if (rc)
19757ceb 1114 goto out_fail;
c92c81df 1115 rc = -ENOBUFS;
7690af3f
ACM
1116 dccp_hashinfo.bind_bucket_cachep =
1117 kmem_cache_create("dccp_bind_bucket",
1118 sizeof(struct inet_bind_bucket), 0,
990c74e3 1119 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
7c657876 1120 if (!dccp_hashinfo.bind_bucket_cachep)
c96b6acc 1121 goto out_free_hashinfo2;
28044fc1
JK
1122 dccp_hashinfo.bind2_bucket_cachep =
1123 kmem_cache_create("dccp_bind2_bucket",
1124 sizeof(struct inet_bind2_bucket), 0,
1125 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
1126 if (!dccp_hashinfo.bind2_bucket_cachep)
1127 goto out_free_bind_bucket_cachep;
7c657876
ACM
1128
1129 /*
1130 * Size and allocate the main established and bind bucket
1131 * hash tables.
1132 *
1133 * The methodology is similar to that of the buffer cache.
1134 */
3d6357de
AK
1135 if (nr_pages >= (128 * 1024))
1136 goal = nr_pages >> (21 - PAGE_SHIFT);
7c657876 1137 else
3d6357de 1138 goal = nr_pages >> (23 - PAGE_SHIFT);
7c657876
ACM
1139
1140 if (thash_entries)
7690af3f
ACM
1141 goal = (thash_entries *
1142 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
7c657876
ACM
1143 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1144 ;
1145 do {
f373b53b 1146 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
7c657876 1147 sizeof(struct inet_ehash_bucket);
f373b53b
ED
1148
1149 while (hash_size & (hash_size - 1))
1150 hash_size--;
1151 dccp_hashinfo.ehash_mask = hash_size - 1;
7c657876 1152 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1c29b3ff 1153 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
7c657876
ACM
1154 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1155
1156 if (!dccp_hashinfo.ehash) {
59348b19 1157 DCCP_CRIT("Failed to allocate DCCP established hash table");
28044fc1 1158 goto out_free_bind2_bucket_cachep;
7c657876
ACM
1159 }
1160
05dbc7b5 1161 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
3ab5aee7 1162 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
7c657876 1163
230140cf
ED
1164 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1165 goto out_free_dccp_ehash;
1166
7c657876
ACM
1167 bhash_order = ehash_order;
1168
1169 do {
1170 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1171 sizeof(struct inet_bind_hashbucket);
7690af3f
ACM
1172 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1173 bhash_order > 0)
7c657876
ACM
1174 continue;
1175 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1c29b3ff 1176 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
7c657876
ACM
1177 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1178
1179 if (!dccp_hashinfo.bhash) {
59348b19 1180 DCCP_CRIT("Failed to allocate DCCP bind hash table");
230140cf 1181 goto out_free_dccp_locks;
7c657876
ACM
1182 }
1183
28044fc1
JK
1184 dccp_hashinfo.bhash2 = (struct inet_bind_hashbucket *)
1185 __get_free_pages(GFP_ATOMIC | __GFP_NOWARN, bhash_order);
1186
1187 if (!dccp_hashinfo.bhash2) {
1188 DCCP_CRIT("Failed to allocate DCCP bind2 hash table");
1189 goto out_free_dccp_bhash;
1190 }
1191
7c657876
ACM
1192 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1193 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1194 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
28044fc1
JK
1195 spin_lock_init(&dccp_hashinfo.bhash2[i].lock);
1196 INIT_HLIST_HEAD(&dccp_hashinfo.bhash2[i].chain);
7c657876
ACM
1197 }
1198
d1e5e640
KI
1199 dccp_hashinfo.pernet = false;
1200
46f09ffa 1201 rc = dccp_mib_init();
fa23e2ec 1202 if (rc)
28044fc1 1203 goto out_free_dccp_bhash2;
7c657876 1204
9b07ef5d 1205 rc = dccp_ackvec_init();
7c657876 1206 if (rc)
b61fafc4 1207 goto out_free_dccp_mib;
9b07ef5d 1208
e55d912f 1209 rc = dccp_sysctl_init();
9b07ef5d
ACM
1210 if (rc)
1211 goto out_ackvec_exit;
4c70f383 1212
ddebc973
GR
1213 rc = ccid_initialize_builtins();
1214 if (rc)
1215 goto out_sysctl_exit;
1216
4c70f383 1217 dccp_timestamping_init();
d14a0ebd
GR
1218
1219 return 0;
1220
ddebc973
GR
1221out_sysctl_exit:
1222 dccp_sysctl_exit();
9b07ef5d
ACM
1223out_ackvec_exit:
1224 dccp_ackvec_exit();
b61fafc4 1225out_free_dccp_mib:
46f09ffa 1226 dccp_mib_exit();
28044fc1
JK
1227out_free_dccp_bhash2:
1228 free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
7c657876
ACM
1229out_free_dccp_bhash:
1230 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
230140cf
ED
1231out_free_dccp_locks:
1232 inet_ehash_locks_free(&dccp_hashinfo);
7c657876
ACM
1233out_free_dccp_ehash:
1234 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
28044fc1
JK
1235out_free_bind2_bucket_cachep:
1236 kmem_cache_destroy(dccp_hashinfo.bind2_bucket_cachep);
7c657876
ACM
1237out_free_bind_bucket_cachep:
1238 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
c96b6acc
WH
1239out_free_hashinfo2:
1240 inet_hashinfo2_free_mod(&dccp_hashinfo);
d14a0ebd
GR
1241out_fail:
1242 dccp_hashinfo.bhash = NULL;
28044fc1 1243 dccp_hashinfo.bhash2 = NULL;
d14a0ebd
GR
1244 dccp_hashinfo.ehash = NULL;
1245 dccp_hashinfo.bind_bucket_cachep = NULL;
28044fc1 1246 dccp_hashinfo.bind2_bucket_cachep = NULL;
d14a0ebd 1247 return rc;
7c657876
ACM
1248}
1249
7c657876
ACM
1250static void __exit dccp_fini(void)
1251{
28044fc1
JK
1252 int bhash_order = get_order(dccp_hashinfo.bhash_size *
1253 sizeof(struct inet_bind_hashbucket));
1254
ddebc973 1255 ccid_cleanup_builtins();
46f09ffa 1256 dccp_mib_exit();
28044fc1
JK
1257 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1258 free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
725ba8ee 1259 free_pages((unsigned long)dccp_hashinfo.ehash,
f373b53b 1260 get_order((dccp_hashinfo.ehash_mask + 1) *
725ba8ee 1261 sizeof(struct inet_ehash_bucket)));
230140cf 1262 inet_ehash_locks_free(&dccp_hashinfo);
7c657876 1263 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
9b07ef5d 1264 dccp_ackvec_exit();
e55d912f 1265 dccp_sysctl_exit();
c96b6acc 1266 inet_hashinfo2_free_mod(&dccp_hashinfo);
7c657876
ACM
1267}
1268
1269module_init(dccp_init);
1270module_exit(dccp_fini);
1271
7c657876
ACM
1272MODULE_LICENSE("GPL");
1273MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1274MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");