isdn:mISDN: fix misuse of %x in hfcpci.c
[linux-2.6-block.git] / net / core / sock.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
4ec93edb 35 * code. The ACK stuff can wait and needs major
1da177e4
LT
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
e005d193
JP
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
80b14dee 94#include <asm/unaligned.h>
4fc268d2 95#include <linux/capability.h>
1da177e4 96#include <linux/errno.h>
cb820f8e 97#include <linux/errqueue.h>
1da177e4
LT
98#include <linux/types.h>
99#include <linux/socket.h>
100#include <linux/in.h>
101#include <linux/kernel.h>
1da177e4
LT
102#include <linux/module.h>
103#include <linux/proc_fs.h>
104#include <linux/seq_file.h>
105#include <linux/sched.h>
f1083048 106#include <linux/sched/mm.h>
1da177e4
LT
107#include <linux/timer.h>
108#include <linux/string.h>
109#include <linux/sockios.h>
110#include <linux/net.h>
111#include <linux/mm.h>
112#include <linux/slab.h>
113#include <linux/interrupt.h>
114#include <linux/poll.h>
115#include <linux/tcp.h>
116#include <linux/init.h>
a1f8e7f7 117#include <linux/highmem.h>
3f551f94 118#include <linux/user_namespace.h>
c5905afb 119#include <linux/static_key.h>
3969eb38 120#include <linux/memcontrol.h>
8c1ae10d 121#include <linux/prefetch.h>
1da177e4 122
7c0f6ba6 123#include <linux/uaccess.h>
1da177e4
LT
124
125#include <linux/netdevice.h>
126#include <net/protocol.h>
127#include <linux/skbuff.h>
457c4cbc 128#include <net/net_namespace.h>
2e6599cb 129#include <net/request_sock.h>
1da177e4 130#include <net/sock.h>
20d49473 131#include <linux/net_tstamp.h>
1da177e4
LT
132#include <net/xfrm.h>
133#include <linux/ipsec.h>
f8451725 134#include <net/cls_cgroup.h>
5bc1421e 135#include <net/netprio_cgroup.h>
eb4cb008 136#include <linux/sock_diag.h>
1da177e4
LT
137
138#include <linux/filter.h>
538950a1 139#include <net/sock_reuseport.h>
1da177e4 140
3847ce32
SM
141#include <trace/events/sock.h>
142
1da177e4 143#include <net/tcp.h>
076bb0c8 144#include <net/busy_poll.h>
06021292 145
36b77a52 146static DEFINE_MUTEX(proto_list_mutex);
d1a4c0b3
GC
147static LIST_HEAD(proto_list);
148
648845ab
TZ
149static void sock_inuse_add(struct net *net, int val);
150
a3b299da
EB
151/**
152 * sk_ns_capable - General socket capability test
153 * @sk: Socket to use a capability on or through
154 * @user_ns: The user namespace of the capability to use
155 * @cap: The capability to use
156 *
157 * Test to see if the opener of the socket had when the socket was
158 * created and the current process has the capability @cap in the user
159 * namespace @user_ns.
160 */
161bool sk_ns_capable(const struct sock *sk,
162 struct user_namespace *user_ns, int cap)
163{
164 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
165 ns_capable(user_ns, cap);
166}
167EXPORT_SYMBOL(sk_ns_capable);
168
169/**
170 * sk_capable - Socket global capability test
171 * @sk: Socket to use a capability on or through
e793c0f7 172 * @cap: The global capability to use
a3b299da
EB
173 *
174 * Test to see if the opener of the socket had when the socket was
175 * created and the current process has the capability @cap in all user
176 * namespaces.
177 */
178bool sk_capable(const struct sock *sk, int cap)
179{
180 return sk_ns_capable(sk, &init_user_ns, cap);
181}
182EXPORT_SYMBOL(sk_capable);
183
184/**
185 * sk_net_capable - Network namespace socket capability test
186 * @sk: Socket to use a capability on or through
187 * @cap: The capability to use
188 *
e793c0f7 189 * Test to see if the opener of the socket had when the socket was created
a3b299da
EB
190 * and the current process has the capability @cap over the network namespace
191 * the socket is a member of.
192 */
193bool sk_net_capable(const struct sock *sk, int cap)
194{
195 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
196}
197EXPORT_SYMBOL(sk_net_capable);
198
da21f24d
IM
199/*
200 * Each address family might have different locking rules, so we have
cdfbabfb
DH
201 * one slock key per address family and separate keys for internal and
202 * userspace sockets.
da21f24d 203 */
a5b5bb9a 204static struct lock_class_key af_family_keys[AF_MAX];
cdfbabfb 205static struct lock_class_key af_family_kern_keys[AF_MAX];
a5b5bb9a 206static struct lock_class_key af_family_slock_keys[AF_MAX];
cdfbabfb 207static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
a5b5bb9a 208
a5b5bb9a
IM
209/*
210 * Make lock validator output more readable. (we pre-construct these
211 * strings build-time, so that runtime initialization of socket
212 * locks is fast):
213 */
cdfbabfb
DH
214
215#define _sock_locks(x) \
216 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
217 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
218 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
219 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
220 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
221 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
222 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
223 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
224 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
225 x "27" , x "28" , x "AF_CAN" , \
226 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
227 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
228 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
229 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
68e8b849
BT
230 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \
231 x "AF_MAX"
cdfbabfb 232
36cbd3dc 233static const char *const af_family_key_strings[AF_MAX+1] = {
cdfbabfb 234 _sock_locks("sk_lock-")
a5b5bb9a 235};
36cbd3dc 236static const char *const af_family_slock_key_strings[AF_MAX+1] = {
cdfbabfb 237 _sock_locks("slock-")
a5b5bb9a 238};
36cbd3dc 239static const char *const af_family_clock_key_strings[AF_MAX+1] = {
cdfbabfb
DH
240 _sock_locks("clock-")
241};
242
243static const char *const af_family_kern_key_strings[AF_MAX+1] = {
244 _sock_locks("k-sk_lock-")
245};
246static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
247 _sock_locks("k-slock-")
248};
249static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
250 _sock_locks("k-clock-")
443aef0e 251};
581319c5 252static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
6b431d50 253 _sock_locks("rlock-")
581319c5
PA
254};
255static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
6b431d50 256 _sock_locks("wlock-")
581319c5
PA
257};
258static const char *const af_family_elock_key_strings[AF_MAX+1] = {
6b431d50 259 _sock_locks("elock-")
581319c5 260};
da21f24d
IM
261
262/*
581319c5 263 * sk_callback_lock and sk queues locking rules are per-address-family,
da21f24d
IM
264 * so split the lock classes by using a per-AF key:
265 */
266static struct lock_class_key af_callback_keys[AF_MAX];
581319c5
PA
267static struct lock_class_key af_rlock_keys[AF_MAX];
268static struct lock_class_key af_wlock_keys[AF_MAX];
269static struct lock_class_key af_elock_keys[AF_MAX];
cdfbabfb 270static struct lock_class_key af_kern_callback_keys[AF_MAX];
da21f24d 271
1da177e4 272/* Run time adjustable parameters. */
ab32ea5d 273__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
6d8ebc8a 274EXPORT_SYMBOL(sysctl_wmem_max);
ab32ea5d 275__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
6d8ebc8a 276EXPORT_SYMBOL(sysctl_rmem_max);
ab32ea5d
BH
277__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
278__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
1da177e4 279
25985edc 280/* Maximal space eaten by iovec or ancillary data plus some space */
ab32ea5d 281int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
2a91525c 282EXPORT_SYMBOL(sysctl_optmem_max);
1da177e4 283
b245be1f
WB
284int sysctl_tstamp_allow_data __read_mostly = 1;
285
a7950ae8
DB
286DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
287EXPORT_SYMBOL_GPL(memalloc_socks_key);
c93bdd0e 288
7cb02404
MG
289/**
290 * sk_set_memalloc - sets %SOCK_MEMALLOC
291 * @sk: socket to set it on
292 *
293 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
294 * It's the responsibility of the admin to adjust min_free_kbytes
295 * to meet the requirements
296 */
297void sk_set_memalloc(struct sock *sk)
298{
299 sock_set_flag(sk, SOCK_MEMALLOC);
300 sk->sk_allocation |= __GFP_MEMALLOC;
a7950ae8 301 static_branch_inc(&memalloc_socks_key);
7cb02404
MG
302}
303EXPORT_SYMBOL_GPL(sk_set_memalloc);
304
305void sk_clear_memalloc(struct sock *sk)
306{
307 sock_reset_flag(sk, SOCK_MEMALLOC);
308 sk->sk_allocation &= ~__GFP_MEMALLOC;
a7950ae8 309 static_branch_dec(&memalloc_socks_key);
c76562b6
MG
310
311 /*
312 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
5d753610
MG
313 * progress of swapping. SOCK_MEMALLOC may be cleared while
314 * it has rmem allocations due to the last swapfile being deactivated
315 * but there is a risk that the socket is unusable due to exceeding
316 * the rmem limits. Reclaim the reserves and obey rmem limits again.
c76562b6 317 */
5d753610 318 sk_mem_reclaim(sk);
7cb02404
MG
319}
320EXPORT_SYMBOL_GPL(sk_clear_memalloc);
321
b4b9e355
MG
322int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
323{
324 int ret;
f1083048 325 unsigned int noreclaim_flag;
b4b9e355
MG
326
327 /* these should have been dropped before queueing */
328 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
329
f1083048 330 noreclaim_flag = memalloc_noreclaim_save();
b4b9e355 331 ret = sk->sk_backlog_rcv(sk, skb);
f1083048 332 memalloc_noreclaim_restore(noreclaim_flag);
b4b9e355
MG
333
334 return ret;
335}
336EXPORT_SYMBOL(__sk_backlog_rcv);
337
a9beb86a 338static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
fe0c72f3 339{
a9beb86a
DD
340 struct __kernel_sock_timeval tv;
341 int size;
fe0c72f3
AB
342
343 if (timeo == MAX_SCHEDULE_TIMEOUT) {
344 tv.tv_sec = 0;
345 tv.tv_usec = 0;
346 } else {
347 tv.tv_sec = timeo / HZ;
348 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
349 }
350
e6986423 351 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
fe0c72f3
AB
352 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
353 *(struct old_timeval32 *)optval = tv32;
354 return sizeof(tv32);
355 }
356
a9beb86a
DD
357 if (old_timeval) {
358 struct __kernel_old_timeval old_tv;
359 old_tv.tv_sec = tv.tv_sec;
360 old_tv.tv_usec = tv.tv_usec;
361 *(struct __kernel_old_timeval *)optval = old_tv;
362 size = sizeof(old_tv);
363 } else {
364 *(struct __kernel_sock_timeval *)optval = tv;
365 size = sizeof(tv);
366 }
367
368 return size;
fe0c72f3
AB
369}
370
a9beb86a 371static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval)
1da177e4 372{
a9beb86a 373 struct __kernel_sock_timeval tv;
1da177e4 374
e6986423 375 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
fe0c72f3
AB
376 struct old_timeval32 tv32;
377
378 if (optlen < sizeof(tv32))
379 return -EINVAL;
380
381 if (copy_from_user(&tv32, optval, sizeof(tv32)))
382 return -EFAULT;
383 tv.tv_sec = tv32.tv_sec;
384 tv.tv_usec = tv32.tv_usec;
a9beb86a
DD
385 } else if (old_timeval) {
386 struct __kernel_old_timeval old_tv;
387
388 if (optlen < sizeof(old_tv))
389 return -EINVAL;
390 if (copy_from_user(&old_tv, optval, sizeof(old_tv)))
391 return -EFAULT;
392 tv.tv_sec = old_tv.tv_sec;
393 tv.tv_usec = old_tv.tv_usec;
fe0c72f3
AB
394 } else {
395 if (optlen < sizeof(tv))
396 return -EINVAL;
397 if (copy_from_user(&tv, optval, sizeof(tv)))
398 return -EFAULT;
399 }
ba78073e
VA
400 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
401 return -EDOM;
1da177e4 402
ba78073e 403 if (tv.tv_sec < 0) {
6f11df83
AM
404 static int warned __read_mostly;
405
ba78073e 406 *timeo_p = 0;
50aab54f 407 if (warned < 10 && net_ratelimit()) {
ba78073e 408 warned++;
e005d193
JP
409 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
410 __func__, current->comm, task_pid_nr(current));
50aab54f 411 }
ba78073e
VA
412 return 0;
413 }
1da177e4
LT
414 *timeo_p = MAX_SCHEDULE_TIMEOUT;
415 if (tv.tv_sec == 0 && tv.tv_usec == 0)
416 return 0;
a9beb86a
DD
417 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
418 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
1da177e4
LT
419 return 0;
420}
421
422static void sock_warn_obsolete_bsdism(const char *name)
423{
424 static int warned;
425 static char warncomm[TASK_COMM_LEN];
4ec93edb
YH
426 if (strcmp(warncomm, current->comm) && warned < 5) {
427 strcpy(warncomm, current->comm);
e005d193
JP
428 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
429 warncomm, name);
1da177e4
LT
430 warned++;
431 }
432}
433
080a270f
HFS
434static bool sock_needs_netstamp(const struct sock *sk)
435{
436 switch (sk->sk_family) {
437 case AF_UNSPEC:
438 case AF_UNIX:
439 return false;
440 default:
441 return true;
442 }
443}
444
08e29af3 445static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
4ec93edb 446{
08e29af3
ED
447 if (sk->sk_flags & flags) {
448 sk->sk_flags &= ~flags;
080a270f
HFS
449 if (sock_needs_netstamp(sk) &&
450 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
20d49473 451 net_disable_timestamp();
1da177e4
LT
452 }
453}
454
455
e6afc8ac 456int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
f0088a50 457{
3b885787
NH
458 unsigned long flags;
459 struct sk_buff_head *list = &sk->sk_receive_queue;
f0088a50 460
0fd7bac6 461 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
766e9037 462 atomic_inc(&sk->sk_drops);
3847ce32 463 trace_sock_rcvqueue_full(sk, skb);
766e9037 464 return -ENOMEM;
f0088a50
DV
465 }
466
c76562b6 467 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
766e9037
ED
468 atomic_inc(&sk->sk_drops);
469 return -ENOBUFS;
3ab224be
HA
470 }
471
f0088a50
DV
472 skb->dev = NULL;
473 skb_set_owner_r(skb, sk);
49ad9599 474
7fee226a
ED
475 /* we escape from rcu protected region, make sure we dont leak
476 * a norefcounted dst
477 */
478 skb_dst_force(skb);
479
3b885787 480 spin_lock_irqsave(&list->lock, flags);
3bc3b96f 481 sock_skb_set_dropcount(sk, skb);
3b885787
NH
482 __skb_queue_tail(list, skb);
483 spin_unlock_irqrestore(&list->lock, flags);
f0088a50
DV
484
485 if (!sock_flag(sk, SOCK_DEAD))
676d2369 486 sk->sk_data_ready(sk);
766e9037 487 return 0;
f0088a50 488}
e6afc8ac 489EXPORT_SYMBOL(__sock_queue_rcv_skb);
490
491int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
492{
493 int err;
494
495 err = sk_filter(sk, skb);
496 if (err)
497 return err;
498
499 return __sock_queue_rcv_skb(sk, skb);
500}
f0088a50
DV
501EXPORT_SYMBOL(sock_queue_rcv_skb);
502
4f0c40d9 503int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
c3f24cfb 504 const int nested, unsigned int trim_cap, bool refcounted)
f0088a50
DV
505{
506 int rc = NET_RX_SUCCESS;
507
4f0c40d9 508 if (sk_filter_trim_cap(sk, skb, trim_cap))
f0088a50
DV
509 goto discard_and_relse;
510
511 skb->dev = NULL;
512
274f482d 513 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
c377411f
ED
514 atomic_inc(&sk->sk_drops);
515 goto discard_and_relse;
516 }
58a5a7b9
ACM
517 if (nested)
518 bh_lock_sock_nested(sk);
519 else
520 bh_lock_sock(sk);
a5b5bb9a
IM
521 if (!sock_owned_by_user(sk)) {
522 /*
523 * trylock + unlock semantics:
524 */
525 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
526
c57943a1 527 rc = sk_backlog_rcv(sk, skb);
a5b5bb9a
IM
528
529 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
f545a38f 530 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
8eae939f
ZY
531 bh_unlock_sock(sk);
532 atomic_inc(&sk->sk_drops);
533 goto discard_and_relse;
534 }
535
f0088a50
DV
536 bh_unlock_sock(sk);
537out:
c3f24cfb
ED
538 if (refcounted)
539 sock_put(sk);
f0088a50
DV
540 return rc;
541discard_and_relse:
542 kfree_skb(skb);
543 goto out;
544}
4f0c40d9 545EXPORT_SYMBOL(__sk_receive_skb);
f0088a50
DV
546
547struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
548{
b6c6712a 549 struct dst_entry *dst = __sk_dst_get(sk);
f0088a50
DV
550
551 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
e022f0b4 552 sk_tx_queue_clear(sk);
9b8805a3 553 sk->sk_dst_pending_confirm = 0;
a9b3cd7f 554 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
f0088a50
DV
555 dst_release(dst);
556 return NULL;
557 }
558
559 return dst;
560}
561EXPORT_SYMBOL(__sk_dst_check);
562
563struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
564{
565 struct dst_entry *dst = sk_dst_get(sk);
566
567 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
568 sk_dst_reset(sk);
569 dst_release(dst);
570 return NULL;
571 }
572
573 return dst;
574}
575EXPORT_SYMBOL(sk_dst_check);
576
f5dd3d0c 577static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
4878809f
DM
578{
579 int ret = -ENOPROTOOPT;
580#ifdef CONFIG_NETDEVICES
3b1e0a65 581 struct net *net = sock_net(sk);
4878809f
DM
582
583 /* Sorry... */
584 ret = -EPERM;
5e1fccc0 585 if (!ns_capable(net->user_ns, CAP_NET_RAW))
4878809f
DM
586 goto out;
587
f5dd3d0c
DH
588 ret = -EINVAL;
589 if (ifindex < 0)
590 goto out;
591
592 sk->sk_bound_dev_if = ifindex;
593 if (sk->sk_prot->rehash)
594 sk->sk_prot->rehash(sk);
595 sk_dst_reset(sk);
596
597 ret = 0;
598
599out:
600#endif
601
602 return ret;
603}
604
605static int sock_setbindtodevice(struct sock *sk, char __user *optval,
606 int optlen)
607{
608 int ret = -ENOPROTOOPT;
609#ifdef CONFIG_NETDEVICES
610 struct net *net = sock_net(sk);
611 char devname[IFNAMSIZ];
612 int index;
613
4878809f
DM
614 ret = -EINVAL;
615 if (optlen < 0)
616 goto out;
617
618 /* Bind this socket to a particular device like "eth0",
619 * as specified in the passed interface name. If the
620 * name is "" or the option length is zero the socket
621 * is not bound.
622 */
623 if (optlen > IFNAMSIZ - 1)
624 optlen = IFNAMSIZ - 1;
625 memset(devname, 0, sizeof(devname));
626
627 ret = -EFAULT;
628 if (copy_from_user(devname, optval, optlen))
629 goto out;
630
000ba2e4
DM
631 index = 0;
632 if (devname[0] != '\0') {
bf8e56bf 633 struct net_device *dev;
4878809f 634
bf8e56bf
ED
635 rcu_read_lock();
636 dev = dev_get_by_name_rcu(net, devname);
637 if (dev)
638 index = dev->ifindex;
639 rcu_read_unlock();
4878809f
DM
640 ret = -ENODEV;
641 if (!dev)
642 goto out;
4878809f
DM
643 }
644
645 lock_sock(sk);
f5dd3d0c 646 ret = sock_setbindtodevice_locked(sk, index);
4878809f
DM
647 release_sock(sk);
648
4878809f
DM
649out:
650#endif
651
652 return ret;
653}
654
c91f6df2
BH
655static int sock_getbindtodevice(struct sock *sk, char __user *optval,
656 int __user *optlen, int len)
657{
658 int ret = -ENOPROTOOPT;
659#ifdef CONFIG_NETDEVICES
660 struct net *net = sock_net(sk);
c91f6df2 661 char devname[IFNAMSIZ];
c91f6df2
BH
662
663 if (sk->sk_bound_dev_if == 0) {
664 len = 0;
665 goto zero;
666 }
667
668 ret = -EINVAL;
669 if (len < IFNAMSIZ)
670 goto out;
671
5dbe7c17
NS
672 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
673 if (ret)
c91f6df2 674 goto out;
c91f6df2
BH
675
676 len = strlen(devname) + 1;
677
678 ret = -EFAULT;
679 if (copy_to_user(optval, devname, len))
680 goto out;
681
682zero:
683 ret = -EFAULT;
684 if (put_user(len, optlen))
685 goto out;
686
687 ret = 0;
688
689out:
690#endif
691
692 return ret;
693}
694
c0ef877b
PE
695static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
696{
697 if (valbool)
698 sock_set_flag(sk, bit);
699 else
700 sock_reset_flag(sk, bit);
701}
702
f60e5990 703bool sk_mc_loop(struct sock *sk)
704{
705 if (dev_recursion_level())
706 return false;
707 if (!sk)
708 return true;
709 switch (sk->sk_family) {
710 case AF_INET:
711 return inet_sk(sk)->mc_loop;
712#if IS_ENABLED(CONFIG_IPV6)
713 case AF_INET6:
714 return inet6_sk(sk)->mc_loop;
715#endif
716 }
717 WARN_ON(1);
718 return true;
719}
720EXPORT_SYMBOL(sk_mc_loop);
721
1da177e4
LT
722/*
723 * This is meant for all protocols to use and covers goings on
724 * at the socket level. Everything here is generic.
725 */
726
727int sock_setsockopt(struct socket *sock, int level, int optname,
b7058842 728 char __user *optval, unsigned int optlen)
1da177e4 729{
80b14dee 730 struct sock_txtime sk_txtime;
2a91525c 731 struct sock *sk = sock->sk;
1da177e4
LT
732 int val;
733 int valbool;
734 struct linger ling;
735 int ret = 0;
4ec93edb 736
1da177e4
LT
737 /*
738 * Options without arguments
739 */
740
4878809f 741 if (optname == SO_BINDTODEVICE)
c91f6df2 742 return sock_setbindtodevice(sk, optval, optlen);
4878809f 743
e71a4783
SH
744 if (optlen < sizeof(int))
745 return -EINVAL;
4ec93edb 746
1da177e4
LT
747 if (get_user(val, (int __user *)optval))
748 return -EFAULT;
4ec93edb 749
2a91525c 750 valbool = val ? 1 : 0;
1da177e4
LT
751
752 lock_sock(sk);
753
2a91525c 754 switch (optname) {
e71a4783 755 case SO_DEBUG:
2a91525c 756 if (val && !capable(CAP_NET_ADMIN))
e71a4783 757 ret = -EACCES;
2a91525c 758 else
c0ef877b 759 sock_valbool_flag(sk, SOCK_DBG, valbool);
e71a4783
SH
760 break;
761 case SO_REUSEADDR:
cdb8744d 762 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
e71a4783 763 break;
055dc21a
TH
764 case SO_REUSEPORT:
765 sk->sk_reuseport = valbool;
766 break;
e71a4783 767 case SO_TYPE:
49c794e9 768 case SO_PROTOCOL:
0d6038ee 769 case SO_DOMAIN:
e71a4783
SH
770 case SO_ERROR:
771 ret = -ENOPROTOOPT;
772 break;
773 case SO_DONTROUTE:
c0ef877b 774 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
0fbe82e6 775 sk_dst_reset(sk);
e71a4783
SH
776 break;
777 case SO_BROADCAST:
778 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
779 break;
780 case SO_SNDBUF:
781 /* Don't error on this BSD doesn't and if you think
82981930
ED
782 * about it this is right. Otherwise apps have to
783 * play 'guess the biggest size' games. RCVBUF/SNDBUF
784 * are treated in BSD as hints
785 */
786 val = min_t(u32, val, sysctl_wmem_max);
b0573dea 787set_sndbuf:
4057765f
GN
788 /* Ensure val * 2 fits into an int, to prevent max_t()
789 * from treating it as a negative value.
790 */
791 val = min_t(int, val, INT_MAX / 2);
e71a4783 792 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
b98b0bc8 793 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
82981930 794 /* Wake up sending tasks if we upped the value. */
e71a4783
SH
795 sk->sk_write_space(sk);
796 break;
1da177e4 797
e71a4783
SH
798 case SO_SNDBUFFORCE:
799 if (!capable(CAP_NET_ADMIN)) {
800 ret = -EPERM;
801 break;
802 }
4057765f
GN
803
804 /* No negative values (to prevent underflow, as val will be
805 * multiplied by 2).
806 */
807 if (val < 0)
808 val = 0;
e71a4783 809 goto set_sndbuf;
b0573dea 810
e71a4783
SH
811 case SO_RCVBUF:
812 /* Don't error on this BSD doesn't and if you think
82981930
ED
813 * about it this is right. Otherwise apps have to
814 * play 'guess the biggest size' games. RCVBUF/SNDBUF
815 * are treated in BSD as hints
816 */
817 val = min_t(u32, val, sysctl_rmem_max);
b0573dea 818set_rcvbuf:
4057765f
GN
819 /* Ensure val * 2 fits into an int, to prevent max_t()
820 * from treating it as a negative value.
821 */
822 val = min_t(int, val, INT_MAX / 2);
e71a4783
SH
823 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
824 /*
825 * We double it on the way in to account for
826 * "struct sk_buff" etc. overhead. Applications
827 * assume that the SO_RCVBUF setting they make will
828 * allow that much actual data to be received on that
829 * socket.
830 *
831 * Applications are unaware that "struct sk_buff" and
832 * other overheads allocate from the receive buffer
833 * during socket buffer allocation.
834 *
835 * And after considering the possible alternatives,
836 * returning the value we actually used in getsockopt
837 * is the most desirable behavior.
838 */
b98b0bc8 839 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
e71a4783
SH
840 break;
841
842 case SO_RCVBUFFORCE:
843 if (!capable(CAP_NET_ADMIN)) {
844 ret = -EPERM;
1da177e4 845 break;
e71a4783 846 }
4057765f
GN
847
848 /* No negative values (to prevent underflow, as val will be
849 * multiplied by 2).
850 */
851 if (val < 0)
852 val = 0;
e71a4783 853 goto set_rcvbuf;
1da177e4 854
e71a4783 855 case SO_KEEPALIVE:
4b9d07a4
UB
856 if (sk->sk_prot->keepalive)
857 sk->sk_prot->keepalive(sk, valbool);
e71a4783
SH
858 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
859 break;
860
861 case SO_OOBINLINE:
862 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
863 break;
864
865 case SO_NO_CHECK:
28448b80 866 sk->sk_no_check_tx = valbool;
e71a4783
SH
867 break;
868
869 case SO_PRIORITY:
5e1fccc0
EB
870 if ((val >= 0 && val <= 6) ||
871 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
e71a4783
SH
872 sk->sk_priority = val;
873 else
874 ret = -EPERM;
875 break;
876
877 case SO_LINGER:
878 if (optlen < sizeof(ling)) {
879 ret = -EINVAL; /* 1003.1g */
1da177e4 880 break;
e71a4783 881 }
2a91525c 882 if (copy_from_user(&ling, optval, sizeof(ling))) {
e71a4783 883 ret = -EFAULT;
1da177e4 884 break;
e71a4783
SH
885 }
886 if (!ling.l_onoff)
887 sock_reset_flag(sk, SOCK_LINGER);
888 else {
1da177e4 889#if (BITS_PER_LONG == 32)
e71a4783
SH
890 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
891 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
1da177e4 892 else
e71a4783
SH
893#endif
894 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
895 sock_set_flag(sk, SOCK_LINGER);
896 }
897 break;
898
899 case SO_BSDCOMPAT:
900 sock_warn_obsolete_bsdism("setsockopt");
901 break;
902
903 case SO_PASSCRED:
904 if (valbool)
905 set_bit(SOCK_PASSCRED, &sock->flags);
906 else
907 clear_bit(SOCK_PASSCRED, &sock->flags);
908 break;
909
7f1bc6e9 910 case SO_TIMESTAMP_OLD:
887feae3 911 case SO_TIMESTAMP_NEW:
7f1bc6e9 912 case SO_TIMESTAMPNS_OLD:
887feae3 913 case SO_TIMESTAMPNS_NEW:
e71a4783 914 if (valbool) {
887feae3
DD
915 if (optname == SO_TIMESTAMP_NEW || optname == SO_TIMESTAMPNS_NEW)
916 sock_set_flag(sk, SOCK_TSTAMP_NEW);
917 else
918 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
919
920 if (optname == SO_TIMESTAMP_OLD || optname == SO_TIMESTAMP_NEW)
92f37fd2
ED
921 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
922 else
923 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
e71a4783 924 sock_set_flag(sk, SOCK_RCVTSTAMP);
20d49473 925 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
92f37fd2 926 } else {
e71a4783 927 sock_reset_flag(sk, SOCK_RCVTSTAMP);
92f37fd2 928 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
887feae3 929 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
92f37fd2 930 }
e71a4783
SH
931 break;
932
9718475e
DD
933 case SO_TIMESTAMPING_NEW:
934 sock_set_flag(sk, SOCK_TSTAMP_NEW);
ff7653f9 935 /* fall through */
7f1bc6e9 936 case SO_TIMESTAMPING_OLD:
20d49473 937 if (val & ~SOF_TIMESTAMPING_MASK) {
f249fb78 938 ret = -EINVAL;
20d49473
PO
939 break;
940 }
b245be1f 941
09c2d251 942 if (val & SOF_TIMESTAMPING_OPT_ID &&
4ed2d765 943 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
ac5cc977
WC
944 if (sk->sk_protocol == IPPROTO_TCP &&
945 sk->sk_type == SOCK_STREAM) {
6db8b963
SHY
946 if ((1 << sk->sk_state) &
947 (TCPF_CLOSE | TCPF_LISTEN)) {
4ed2d765
WB
948 ret = -EINVAL;
949 break;
950 }
951 sk->sk_tskey = tcp_sk(sk)->snd_una;
952 } else {
953 sk->sk_tskey = 0;
954 }
955 }
1c885808
FY
956
957 if (val & SOF_TIMESTAMPING_OPT_STATS &&
958 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
959 ret = -EINVAL;
960 break;
961 }
962
b9f40e21 963 sk->sk_tsflags = val;
20d49473
PO
964 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
965 sock_enable_timestamp(sk,
966 SOCK_TIMESTAMPING_RX_SOFTWARE);
9718475e
DD
967 else {
968 if (optname == SO_TIMESTAMPING_NEW)
969 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
970
20d49473 971 sock_disable_timestamp(sk,
08e29af3 972 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
9718475e 973 }
20d49473
PO
974 break;
975
e71a4783
SH
976 case SO_RCVLOWAT:
977 if (val < 0)
978 val = INT_MAX;
d1361840
ED
979 if (sock->ops->set_rcvlowat)
980 ret = sock->ops->set_rcvlowat(sk, val);
981 else
982 sk->sk_rcvlowat = val ? : 1;
e71a4783
SH
983 break;
984
45bdc661 985 case SO_RCVTIMEO_OLD:
a9beb86a
DD
986 case SO_RCVTIMEO_NEW:
987 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD);
e71a4783
SH
988 break;
989
45bdc661 990 case SO_SNDTIMEO_OLD:
a9beb86a
DD
991 case SO_SNDTIMEO_NEW:
992 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD);
e71a4783 993 break;
1da177e4 994
e71a4783
SH
995 case SO_ATTACH_FILTER:
996 ret = -EINVAL;
997 if (optlen == sizeof(struct sock_fprog)) {
998 struct sock_fprog fprog;
1da177e4 999
e71a4783
SH
1000 ret = -EFAULT;
1001 if (copy_from_user(&fprog, optval, sizeof(fprog)))
1da177e4 1002 break;
e71a4783
SH
1003
1004 ret = sk_attach_filter(&fprog, sk);
1005 }
1006 break;
1007
89aa0758
AS
1008 case SO_ATTACH_BPF:
1009 ret = -EINVAL;
1010 if (optlen == sizeof(u32)) {
1011 u32 ufd;
1012
1013 ret = -EFAULT;
1014 if (copy_from_user(&ufd, optval, sizeof(ufd)))
1015 break;
1016
1017 ret = sk_attach_bpf(ufd, sk);
1018 }
1019 break;
1020
538950a1
CG
1021 case SO_ATTACH_REUSEPORT_CBPF:
1022 ret = -EINVAL;
1023 if (optlen == sizeof(struct sock_fprog)) {
1024 struct sock_fprog fprog;
1025
1026 ret = -EFAULT;
1027 if (copy_from_user(&fprog, optval, sizeof(fprog)))
1028 break;
1029
1030 ret = sk_reuseport_attach_filter(&fprog, sk);
1031 }
1032 break;
1033
1034 case SO_ATTACH_REUSEPORT_EBPF:
1035 ret = -EINVAL;
1036 if (optlen == sizeof(u32)) {
1037 u32 ufd;
1038
1039 ret = -EFAULT;
1040 if (copy_from_user(&ufd, optval, sizeof(ufd)))
1041 break;
1042
1043 ret = sk_reuseport_attach_bpf(ufd, sk);
1044 }
1045 break;
1046
e71a4783 1047 case SO_DETACH_FILTER:
55b33325 1048 ret = sk_detach_filter(sk);
e71a4783 1049 break;
1da177e4 1050
d59577b6
VB
1051 case SO_LOCK_FILTER:
1052 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1053 ret = -EPERM;
1054 else
1055 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1056 break;
1057
e71a4783
SH
1058 case SO_PASSSEC:
1059 if (valbool)
1060 set_bit(SOCK_PASSSEC, &sock->flags);
1061 else
1062 clear_bit(SOCK_PASSSEC, &sock->flags);
1063 break;
4a19ec58 1064 case SO_MARK:
50254256 1065 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
4a19ec58 1066 ret = -EPERM;
50254256 1067 } else if (val != sk->sk_mark) {
4a19ec58 1068 sk->sk_mark = val;
50254256
DB
1069 sk_dst_reset(sk);
1070 }
4a19ec58 1071 break;
877ce7c1 1072
3b885787 1073 case SO_RXQ_OVFL:
8083f0fc 1074 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
3b885787 1075 break;
6e3e939f
JB
1076
1077 case SO_WIFI_STATUS:
1078 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1079 break;
1080
ef64a54f
PE
1081 case SO_PEEK_OFF:
1082 if (sock->ops->set_peek_off)
12663bfc 1083 ret = sock->ops->set_peek_off(sk, val);
ef64a54f
PE
1084 else
1085 ret = -EOPNOTSUPP;
1086 break;
3bdc0eba
BG
1087
1088 case SO_NOFCS:
1089 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1090 break;
1091
7d4c04fc
KJ
1092 case SO_SELECT_ERR_QUEUE:
1093 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1094 break;
1095
e0d1095a 1096#ifdef CONFIG_NET_RX_BUSY_POLL
64b0dc51 1097 case SO_BUSY_POLL:
dafcc438
ET
1098 /* allow unprivileged users to decrease the value */
1099 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1100 ret = -EPERM;
1101 else {
1102 if (val < 0)
1103 ret = -EINVAL;
1104 else
1105 sk->sk_ll_usec = val;
1106 }
1107 break;
1108#endif
62748f32
ED
1109
1110 case SO_MAX_PACING_RATE:
6bdef102
ED
1111 {
1112 unsigned long ulval = (val == ~0U) ? ~0UL : val;
1113
1114 if (sizeof(ulval) != sizeof(val) &&
1115 optlen >= sizeof(ulval) &&
1116 get_user(ulval, (unsigned long __user *)optval)) {
1117 ret = -EFAULT;
1118 break;
1119 }
1120 if (ulval != ~0UL)
218af599
ED
1121 cmpxchg(&sk->sk_pacing_status,
1122 SK_PACING_NONE,
1123 SK_PACING_NEEDED);
6bdef102
ED
1124 sk->sk_max_pacing_rate = ulval;
1125 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
62748f32 1126 break;
6bdef102 1127 }
70da268b
ED
1128 case SO_INCOMING_CPU:
1129 sk->sk_incoming_cpu = val;
1130 break;
1131
a87cb3e4
TH
1132 case SO_CNX_ADVICE:
1133 if (val == 1)
1134 dst_negative_advice(sk);
1135 break;
76851d12
WB
1136
1137 case SO_ZEROCOPY:
28190752 1138 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
b5947e5d
WB
1139 if (!((sk->sk_type == SOCK_STREAM &&
1140 sk->sk_protocol == IPPROTO_TCP) ||
1141 (sk->sk_type == SOCK_DGRAM &&
1142 sk->sk_protocol == IPPROTO_UDP)))
28190752 1143 ret = -ENOTSUPP;
28190752 1144 } else if (sk->sk_family != PF_RDS) {
76851d12 1145 ret = -ENOTSUPP;
28190752
SV
1146 }
1147 if (!ret) {
1148 if (val < 0 || val > 1)
1149 ret = -EINVAL;
1150 else
1151 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
28190752 1152 }
334e6413
JSP
1153 break;
1154
80b14dee
RC
1155 case SO_TXTIME:
1156 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1157 ret = -EPERM;
1158 } else if (optlen != sizeof(struct sock_txtime)) {
1159 ret = -EINVAL;
1160 } else if (copy_from_user(&sk_txtime, optval,
1161 sizeof(struct sock_txtime))) {
1162 ret = -EFAULT;
1163 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1164 ret = -EINVAL;
1165 } else {
1166 sock_valbool_flag(sk, SOCK_TXTIME, true);
1167 sk->sk_clockid = sk_txtime.clockid;
1168 sk->sk_txtime_deadline_mode =
1169 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
4b15c707
JSP
1170 sk->sk_txtime_report_errors =
1171 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
80b14dee
RC
1172 }
1173 break;
1174
f5dd3d0c
DH
1175 case SO_BINDTOIFINDEX:
1176 ret = sock_setbindtodevice_locked(sk, val);
1177 break;
1178
e71a4783
SH
1179 default:
1180 ret = -ENOPROTOOPT;
1181 break;
4ec93edb 1182 }
1da177e4
LT
1183 release_sock(sk);
1184 return ret;
1185}
2a91525c 1186EXPORT_SYMBOL(sock_setsockopt);
1da177e4
LT
1187
1188
8f09898b 1189static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1190 struct ucred *ucred)
3f551f94
EB
1191{
1192 ucred->pid = pid_vnr(pid);
1193 ucred->uid = ucred->gid = -1;
1194 if (cred) {
1195 struct user_namespace *current_ns = current_user_ns();
1196
b2e4f544
EB
1197 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1198 ucred->gid = from_kgid_munged(current_ns, cred->egid);
3f551f94
EB
1199 }
1200}
1201
28b5ba2a
DH
1202static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1203{
1204 struct user_namespace *user_ns = current_user_ns();
1205 int i;
1206
1207 for (i = 0; i < src->ngroups; i++)
1208 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1209 return -EFAULT;
1210
1211 return 0;
1212}
1213
1da177e4
LT
1214int sock_getsockopt(struct socket *sock, int level, int optname,
1215 char __user *optval, int __user *optlen)
1216{
1217 struct sock *sk = sock->sk;
4ec93edb 1218
e71a4783 1219 union {
4ec93edb 1220 int val;
5daab9db 1221 u64 val64;
677f136c 1222 unsigned long ulval;
4ec93edb 1223 struct linger ling;
fe0c72f3
AB
1224 struct old_timeval32 tm32;
1225 struct __kernel_old_timeval tm;
a9beb86a 1226 struct __kernel_sock_timeval stm;
80b14dee 1227 struct sock_txtime txtime;
1da177e4 1228 } v;
4ec93edb 1229
4d0392be 1230 int lv = sizeof(int);
1da177e4 1231 int len;
4ec93edb 1232
e71a4783 1233 if (get_user(len, optlen))
4ec93edb 1234 return -EFAULT;
e71a4783 1235 if (len < 0)
1da177e4 1236 return -EINVAL;
4ec93edb 1237
50fee1de 1238 memset(&v, 0, sizeof(v));
df0bca04 1239
2a91525c 1240 switch (optname) {
e71a4783
SH
1241 case SO_DEBUG:
1242 v.val = sock_flag(sk, SOCK_DBG);
1243 break;
1244
1245 case SO_DONTROUTE:
1246 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1247 break;
1248
1249 case SO_BROADCAST:
1b23a5df 1250 v.val = sock_flag(sk, SOCK_BROADCAST);
e71a4783
SH
1251 break;
1252
1253 case SO_SNDBUF:
1254 v.val = sk->sk_sndbuf;
1255 break;
1256
1257 case SO_RCVBUF:
1258 v.val = sk->sk_rcvbuf;
1259 break;
1260
1261 case SO_REUSEADDR:
1262 v.val = sk->sk_reuse;
1263 break;
1264
055dc21a
TH
1265 case SO_REUSEPORT:
1266 v.val = sk->sk_reuseport;
1267 break;
1268
e71a4783 1269 case SO_KEEPALIVE:
1b23a5df 1270 v.val = sock_flag(sk, SOCK_KEEPOPEN);
e71a4783
SH
1271 break;
1272
1273 case SO_TYPE:
1274 v.val = sk->sk_type;
1275 break;
1276
49c794e9
JE
1277 case SO_PROTOCOL:
1278 v.val = sk->sk_protocol;
1279 break;
1280
0d6038ee
JE
1281 case SO_DOMAIN:
1282 v.val = sk->sk_family;
1283 break;
1284
e71a4783
SH
1285 case SO_ERROR:
1286 v.val = -sock_error(sk);
2a91525c 1287 if (v.val == 0)
e71a4783
SH
1288 v.val = xchg(&sk->sk_err_soft, 0);
1289 break;
1290
1291 case SO_OOBINLINE:
1b23a5df 1292 v.val = sock_flag(sk, SOCK_URGINLINE);
e71a4783
SH
1293 break;
1294
1295 case SO_NO_CHECK:
28448b80 1296 v.val = sk->sk_no_check_tx;
e71a4783
SH
1297 break;
1298
1299 case SO_PRIORITY:
1300 v.val = sk->sk_priority;
1301 break;
1302
1303 case SO_LINGER:
1304 lv = sizeof(v.ling);
1b23a5df 1305 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
e71a4783
SH
1306 v.ling.l_linger = sk->sk_lingertime / HZ;
1307 break;
1308
1309 case SO_BSDCOMPAT:
1310 sock_warn_obsolete_bsdism("getsockopt");
1311 break;
1312
7f1bc6e9 1313 case SO_TIMESTAMP_OLD:
92f37fd2 1314 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
887feae3 1315 !sock_flag(sk, SOCK_TSTAMP_NEW) &&
92f37fd2
ED
1316 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1317 break;
1318
7f1bc6e9 1319 case SO_TIMESTAMPNS_OLD:
887feae3
DD
1320 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1321 break;
1322
1323 case SO_TIMESTAMP_NEW:
1324 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1325 break;
1326
1327 case SO_TIMESTAMPNS_NEW:
1328 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
e71a4783
SH
1329 break;
1330
7f1bc6e9 1331 case SO_TIMESTAMPING_OLD:
b9f40e21 1332 v.val = sk->sk_tsflags;
20d49473
PO
1333 break;
1334
a9beb86a
DD
1335 case SO_RCVTIMEO_OLD:
1336 case SO_RCVTIMEO_NEW:
1337 lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
e71a4783
SH
1338 break;
1339
a9beb86a
DD
1340 case SO_SNDTIMEO_OLD:
1341 case SO_SNDTIMEO_NEW:
1342 lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
e71a4783 1343 break;
1da177e4 1344
e71a4783
SH
1345 case SO_RCVLOWAT:
1346 v.val = sk->sk_rcvlowat;
1347 break;
1da177e4 1348
e71a4783 1349 case SO_SNDLOWAT:
2a91525c 1350 v.val = 1;
e71a4783 1351 break;
1da177e4 1352
e71a4783 1353 case SO_PASSCRED:
82981930 1354 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
e71a4783 1355 break;
1da177e4 1356
e71a4783 1357 case SO_PEERCRED:
109f6e39
EB
1358 {
1359 struct ucred peercred;
1360 if (len > sizeof(peercred))
1361 len = sizeof(peercred);
1362 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1363 if (copy_to_user(optval, &peercred, len))
e71a4783
SH
1364 return -EFAULT;
1365 goto lenout;
109f6e39 1366 }
1da177e4 1367
28b5ba2a
DH
1368 case SO_PEERGROUPS:
1369 {
1370 int ret, n;
1371
1372 if (!sk->sk_peer_cred)
1373 return -ENODATA;
1374
1375 n = sk->sk_peer_cred->group_info->ngroups;
1376 if (len < n * sizeof(gid_t)) {
1377 len = n * sizeof(gid_t);
1378 return put_user(len, optlen) ? -EFAULT : -ERANGE;
1379 }
1380 len = n * sizeof(gid_t);
1381
1382 ret = groups_to_user((gid_t __user *)optval,
1383 sk->sk_peer_cred->group_info);
1384 if (ret)
1385 return ret;
1386 goto lenout;
1387 }
1388
e71a4783
SH
1389 case SO_PEERNAME:
1390 {
1391 char address[128];
1392
9b2c45d4
DV
1393 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1394 if (lv < 0)
e71a4783
SH
1395 return -ENOTCONN;
1396 if (lv < len)
1397 return -EINVAL;
1398 if (copy_to_user(optval, address, len))
1399 return -EFAULT;
1400 goto lenout;
1401 }
1da177e4 1402
e71a4783
SH
1403 /* Dubious BSD thing... Probably nobody even uses it, but
1404 * the UNIX standard wants it for whatever reason... -DaveM
1405 */
1406 case SO_ACCEPTCONN:
1407 v.val = sk->sk_state == TCP_LISTEN;
1408 break;
1da177e4 1409
e71a4783 1410 case SO_PASSSEC:
82981930 1411 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
e71a4783 1412 break;
877ce7c1 1413
e71a4783
SH
1414 case SO_PEERSEC:
1415 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1da177e4 1416
4a19ec58
LAT
1417 case SO_MARK:
1418 v.val = sk->sk_mark;
1419 break;
1420
3b885787 1421 case SO_RXQ_OVFL:
1b23a5df 1422 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
3b885787
NH
1423 break;
1424
6e3e939f 1425 case SO_WIFI_STATUS:
1b23a5df 1426 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
6e3e939f
JB
1427 break;
1428
ef64a54f
PE
1429 case SO_PEEK_OFF:
1430 if (!sock->ops->set_peek_off)
1431 return -EOPNOTSUPP;
1432
1433 v.val = sk->sk_peek_off;
1434 break;
bc2f7996 1435 case SO_NOFCS:
1b23a5df 1436 v.val = sock_flag(sk, SOCK_NOFCS);
bc2f7996 1437 break;
c91f6df2 1438
f7b86bfe 1439 case SO_BINDTODEVICE:
c91f6df2
BH
1440 return sock_getbindtodevice(sk, optval, optlen, len);
1441
a8fc9277
PE
1442 case SO_GET_FILTER:
1443 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1444 if (len < 0)
1445 return len;
1446
1447 goto lenout;
c91f6df2 1448
d59577b6
VB
1449 case SO_LOCK_FILTER:
1450 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1451 break;
1452
ea02f941
MS
1453 case SO_BPF_EXTENSIONS:
1454 v.val = bpf_tell_extensions();
1455 break;
1456
7d4c04fc
KJ
1457 case SO_SELECT_ERR_QUEUE:
1458 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1459 break;
1460
e0d1095a 1461#ifdef CONFIG_NET_RX_BUSY_POLL
64b0dc51 1462 case SO_BUSY_POLL:
dafcc438
ET
1463 v.val = sk->sk_ll_usec;
1464 break;
1465#endif
1466
62748f32 1467 case SO_MAX_PACING_RATE:
677f136c
ED
1468 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1469 lv = sizeof(v.ulval);
1470 v.ulval = sk->sk_max_pacing_rate;
1471 } else {
1472 /* 32bit version */
1473 v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
1474 }
62748f32
ED
1475 break;
1476
2c8c56e1
ED
1477 case SO_INCOMING_CPU:
1478 v.val = sk->sk_incoming_cpu;
1479 break;
1480
a2d133b1
JH
1481 case SO_MEMINFO:
1482 {
1483 u32 meminfo[SK_MEMINFO_VARS];
1484
1485 if (get_user(len, optlen))
1486 return -EFAULT;
1487
1488 sk_get_meminfo(sk, meminfo);
1489
1490 len = min_t(unsigned int, len, sizeof(meminfo));
1491 if (copy_to_user(optval, &meminfo, len))
1492 return -EFAULT;
1493
1494 goto lenout;
1495 }
6d433902
SS
1496
1497#ifdef CONFIG_NET_RX_BUSY_POLL
1498 case SO_INCOMING_NAPI_ID:
1499 v.val = READ_ONCE(sk->sk_napi_id);
1500
1501 /* aggregate non-NAPI IDs down to 0 */
1502 if (v.val < MIN_NAPI_ID)
1503 v.val = 0;
1504
1505 break;
1506#endif
1507
5daab9db
CF
1508 case SO_COOKIE:
1509 lv = sizeof(u64);
1510 if (len < lv)
1511 return -EINVAL;
1512 v.val64 = sock_gen_cookie(sk);
1513 break;
1514
76851d12
WB
1515 case SO_ZEROCOPY:
1516 v.val = sock_flag(sk, SOCK_ZEROCOPY);
1517 break;
1518
80b14dee
RC
1519 case SO_TXTIME:
1520 lv = sizeof(v.txtime);
1521 v.txtime.clockid = sk->sk_clockid;
1522 v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1523 SOF_TXTIME_DEADLINE_MODE : 0;
4b15c707
JSP
1524 v.txtime.flags |= sk->sk_txtime_report_errors ?
1525 SOF_TXTIME_REPORT_ERRORS : 0;
80b14dee
RC
1526 break;
1527
f5dd3d0c
DH
1528 case SO_BINDTOIFINDEX:
1529 v.val = sk->sk_bound_dev_if;
1530 break;
1531
e71a4783 1532 default:
443b5991
YH
1533 /* We implement the SO_SNDLOWAT etc to not be settable
1534 * (1003.1g 7).
1535 */
e71a4783 1536 return -ENOPROTOOPT;
1da177e4 1537 }
e71a4783 1538
1da177e4
LT
1539 if (len > lv)
1540 len = lv;
1541 if (copy_to_user(optval, &v, len))
1542 return -EFAULT;
1543lenout:
4ec93edb
YH
1544 if (put_user(len, optlen))
1545 return -EFAULT;
1546 return 0;
1da177e4
LT
1547}
1548
a5b5bb9a
IM
1549/*
1550 * Initialize an sk_lock.
1551 *
1552 * (We also register the sk_lock with the lock validator.)
1553 */
b6f99a21 1554static inline void sock_lock_init(struct sock *sk)
a5b5bb9a 1555{
cdfbabfb
DH
1556 if (sk->sk_kern_sock)
1557 sock_lock_init_class_and_name(
1558 sk,
1559 af_family_kern_slock_key_strings[sk->sk_family],
1560 af_family_kern_slock_keys + sk->sk_family,
1561 af_family_kern_key_strings[sk->sk_family],
1562 af_family_kern_keys + sk->sk_family);
1563 else
1564 sock_lock_init_class_and_name(
1565 sk,
ed07536e
PZ
1566 af_family_slock_key_strings[sk->sk_family],
1567 af_family_slock_keys + sk->sk_family,
1568 af_family_key_strings[sk->sk_family],
1569 af_family_keys + sk->sk_family);
a5b5bb9a
IM
1570}
1571
4dc6dc71
ED
1572/*
1573 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1574 * even temporarly, because of RCU lookups. sk_node should also be left as is.
68835aba 1575 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
4dc6dc71 1576 */
f1a6c4da
PE
1577static void sock_copy(struct sock *nsk, const struct sock *osk)
1578{
1579#ifdef CONFIG_SECURITY_NETWORK
1580 void *sptr = nsk->sk_security;
1581#endif
68835aba
ED
1582 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1583
1584 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1585 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1586
f1a6c4da
PE
1587#ifdef CONFIG_SECURITY_NETWORK
1588 nsk->sk_security = sptr;
1589 security_sk_clone(osk, nsk);
1590#endif
1591}
1592
2e4afe7b
PE
1593static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1594 int family)
c308c1b2
PE
1595{
1596 struct sock *sk;
1597 struct kmem_cache *slab;
1598
1599 slab = prot->slab;
e912b114
ED
1600 if (slab != NULL) {
1601 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1602 if (!sk)
1603 return sk;
ba2489b0
ED
1604 if (priority & __GFP_ZERO)
1605 sk_prot_clear_nulls(sk, prot->obj_size);
fcbdf09d 1606 } else
c308c1b2
PE
1607 sk = kmalloc(prot->obj_size, priority);
1608
2e4afe7b
PE
1609 if (sk != NULL) {
1610 if (security_sk_alloc(sk, family, priority))
1611 goto out_free;
1612
1613 if (!try_module_get(prot->owner))
1614 goto out_free_sec;
e022f0b4 1615 sk_tx_queue_clear(sk);
2e4afe7b
PE
1616 }
1617
c308c1b2 1618 return sk;
2e4afe7b
PE
1619
1620out_free_sec:
1621 security_sk_free(sk);
1622out_free:
1623 if (slab != NULL)
1624 kmem_cache_free(slab, sk);
1625 else
1626 kfree(sk);
1627 return NULL;
c308c1b2
PE
1628}
1629
1630static void sk_prot_free(struct proto *prot, struct sock *sk)
1631{
1632 struct kmem_cache *slab;
2e4afe7b 1633 struct module *owner;
c308c1b2 1634
2e4afe7b 1635 owner = prot->owner;
c308c1b2 1636 slab = prot->slab;
2e4afe7b 1637
bd1060a1 1638 cgroup_sk_free(&sk->sk_cgrp_data);
2d758073 1639 mem_cgroup_sk_free(sk);
2e4afe7b 1640 security_sk_free(sk);
c308c1b2
PE
1641 if (slab != NULL)
1642 kmem_cache_free(slab, sk);
1643 else
1644 kfree(sk);
2e4afe7b 1645 module_put(owner);
c308c1b2
PE
1646}
1647
1da177e4
LT
1648/**
1649 * sk_alloc - All socket objects are allocated here
c4ea43c5 1650 * @net: the applicable net namespace
4dc3b16b
PP
1651 * @family: protocol family
1652 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1653 * @prot: struct proto associated with this new sock instance
11aa9c28 1654 * @kern: is this to be a kernel socket?
1da177e4 1655 */
1b8d7ae4 1656struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
11aa9c28 1657 struct proto *prot, int kern)
1da177e4 1658{
c308c1b2 1659 struct sock *sk;
1da177e4 1660
154adbc8 1661 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1da177e4 1662 if (sk) {
154adbc8
PE
1663 sk->sk_family = family;
1664 /*
1665 * See comment in struct sock definition to understand
1666 * why we need sk_prot_creator -acme
1667 */
1668 sk->sk_prot = sk->sk_prot_creator = prot;
cdfbabfb 1669 sk->sk_kern_sock = kern;
154adbc8 1670 sock_lock_init(sk);
26abe143 1671 sk->sk_net_refcnt = kern ? 0 : 1;
648845ab 1672 if (likely(sk->sk_net_refcnt)) {
26abe143 1673 get_net(net);
648845ab
TZ
1674 sock_inuse_add(net, 1);
1675 }
1676
26abe143 1677 sock_net_set(sk, net);
14afee4b 1678 refcount_set(&sk->sk_wmem_alloc, 1);
f8451725 1679
2d758073 1680 mem_cgroup_sk_alloc(sk);
d979a39d 1681 cgroup_sk_alloc(&sk->sk_cgrp_data);
2a56a1fe
TH
1682 sock_update_classid(&sk->sk_cgrp_data);
1683 sock_update_netprioidx(&sk->sk_cgrp_data);
1da177e4 1684 }
a79af59e 1685
2e4afe7b 1686 return sk;
1da177e4 1687}
2a91525c 1688EXPORT_SYMBOL(sk_alloc);
1da177e4 1689
a4298e45
ED
1690/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1691 * grace period. This is the case for UDP sockets and TCP listeners.
1692 */
1693static void __sk_destruct(struct rcu_head *head)
1da177e4 1694{
a4298e45 1695 struct sock *sk = container_of(head, struct sock, sk_rcu);
1da177e4 1696 struct sk_filter *filter;
1da177e4
LT
1697
1698 if (sk->sk_destruct)
1699 sk->sk_destruct(sk);
1700
a898def2 1701 filter = rcu_dereference_check(sk->sk_filter,
14afee4b 1702 refcount_read(&sk->sk_wmem_alloc) == 0);
1da177e4 1703 if (filter) {
309dd5fc 1704 sk_filter_uncharge(sk, filter);
a9b3cd7f 1705 RCU_INIT_POINTER(sk->sk_filter, NULL);
1da177e4 1706 }
538950a1
CG
1707 if (rcu_access_pointer(sk->sk_reuseport_cb))
1708 reuseport_detach_sock(sk);
1da177e4 1709
08e29af3 1710 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1da177e4
LT
1711
1712 if (atomic_read(&sk->sk_omem_alloc))
e005d193
JP
1713 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1714 __func__, atomic_read(&sk->sk_omem_alloc));
1da177e4 1715
22a0e18e
ED
1716 if (sk->sk_frag.page) {
1717 put_page(sk->sk_frag.page);
1718 sk->sk_frag.page = NULL;
1719 }
1720
109f6e39
EB
1721 if (sk->sk_peer_cred)
1722 put_cred(sk->sk_peer_cred);
1723 put_pid(sk->sk_peer_pid);
26abe143
EB
1724 if (likely(sk->sk_net_refcnt))
1725 put_net(sock_net(sk));
c308c1b2 1726 sk_prot_free(sk->sk_prot_creator, sk);
1da177e4 1727}
2b85a34e 1728
a4298e45
ED
1729void sk_destruct(struct sock *sk)
1730{
1731 if (sock_flag(sk, SOCK_RCU_FREE))
1732 call_rcu(&sk->sk_rcu, __sk_destruct);
1733 else
1734 __sk_destruct(&sk->sk_rcu);
1735}
1736
eb4cb008
CG
1737static void __sk_free(struct sock *sk)
1738{
648845ab
TZ
1739 if (likely(sk->sk_net_refcnt))
1740 sock_inuse_add(sock_net(sk), -1);
1741
9709020c 1742 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
eb4cb008
CG
1743 sock_diag_broadcast_destroy(sk);
1744 else
1745 sk_destruct(sk);
1746}
1747
2b85a34e
ED
1748void sk_free(struct sock *sk)
1749{
1750 /*
25985edc 1751 * We subtract one from sk_wmem_alloc and can know if
2b85a34e
ED
1752 * some packets are still in some tx queue.
1753 * If not null, sock_wfree() will call __sk_free(sk) later
1754 */
14afee4b 1755 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
2b85a34e
ED
1756 __sk_free(sk);
1757}
2a91525c 1758EXPORT_SYMBOL(sk_free);
1da177e4 1759
581319c5
PA
1760static void sk_init_common(struct sock *sk)
1761{
1762 skb_queue_head_init(&sk->sk_receive_queue);
1763 skb_queue_head_init(&sk->sk_write_queue);
1764 skb_queue_head_init(&sk->sk_error_queue);
1765
1766 rwlock_init(&sk->sk_callback_lock);
1767 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1768 af_rlock_keys + sk->sk_family,
1769 af_family_rlock_key_strings[sk->sk_family]);
1770 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1771 af_wlock_keys + sk->sk_family,
1772 af_family_wlock_key_strings[sk->sk_family]);
1773 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1774 af_elock_keys + sk->sk_family,
1775 af_family_elock_key_strings[sk->sk_family]);
1776 lockdep_set_class_and_name(&sk->sk_callback_lock,
1777 af_callback_keys + sk->sk_family,
1778 af_family_clock_key_strings[sk->sk_family]);
1779}
1780
e56c57d0
ED
1781/**
1782 * sk_clone_lock - clone a socket, and lock its clone
1783 * @sk: the socket to clone
1784 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1785 *
1786 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1787 */
1788struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
87d11ceb 1789{
8fd1d178 1790 struct sock *newsk;
278571ba 1791 bool is_charged = true;
87d11ceb 1792
8fd1d178 1793 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
87d11ceb
ACM
1794 if (newsk != NULL) {
1795 struct sk_filter *filter;
1796
892c141e 1797 sock_copy(newsk, sk);
87d11ceb 1798
9d538fa6
CP
1799 newsk->sk_prot_creator = sk->sk_prot;
1800
87d11ceb 1801 /* SANITY */
8a681736
SV
1802 if (likely(newsk->sk_net_refcnt))
1803 get_net(sock_net(newsk));
87d11ceb
ACM
1804 sk_node_init(&newsk->sk_node);
1805 sock_lock_init(newsk);
1806 bh_lock_sock(newsk);
fa438ccf 1807 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
8eae939f 1808 newsk->sk_backlog.len = 0;
87d11ceb
ACM
1809
1810 atomic_set(&newsk->sk_rmem_alloc, 0);
2b85a34e
ED
1811 /*
1812 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1813 */
14afee4b 1814 refcount_set(&newsk->sk_wmem_alloc, 1);
87d11ceb 1815 atomic_set(&newsk->sk_omem_alloc, 0);
581319c5 1816 sk_init_common(newsk);
87d11ceb
ACM
1817
1818 newsk->sk_dst_cache = NULL;
9b8805a3 1819 newsk->sk_dst_pending_confirm = 0;
87d11ceb
ACM
1820 newsk->sk_wmem_queued = 0;
1821 newsk->sk_forward_alloc = 0;
9caad864 1822 atomic_set(&newsk->sk_drops, 0);
87d11ceb 1823 newsk->sk_send_head = NULL;
87d11ceb 1824 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
52267790 1825 atomic_set(&newsk->sk_zckey, 0);
87d11ceb
ACM
1826
1827 sock_reset_flag(newsk, SOCK_DONE);
edbe69ef 1828 mem_cgroup_sk_alloc(newsk);
c0576e39 1829 cgroup_sk_alloc(&newsk->sk_cgrp_data);
87d11ceb 1830
eefca20e
ED
1831 rcu_read_lock();
1832 filter = rcu_dereference(sk->sk_filter);
87d11ceb 1833 if (filter != NULL)
278571ba
AS
1834 /* though it's an empty new sock, the charging may fail
1835 * if sysctl_optmem_max was changed between creation of
1836 * original socket and cloning
1837 */
1838 is_charged = sk_filter_charge(newsk, filter);
eefca20e
ED
1839 RCU_INIT_POINTER(newsk->sk_filter, filter);
1840 rcu_read_unlock();
87d11ceb 1841
d188ba86 1842 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
a97e50cc
DB
1843 /* We need to make sure that we don't uncharge the new
1844 * socket if we couldn't charge it in the first place
1845 * as otherwise we uncharge the parent's filter.
1846 */
1847 if (!is_charged)
1848 RCU_INIT_POINTER(newsk->sk_filter, NULL);
94352d45 1849 sk_free_unlock_clone(newsk);
87d11ceb
ACM
1850 newsk = NULL;
1851 goto out;
1852 }
fa463497 1853 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
87d11ceb
ACM
1854
1855 newsk->sk_err = 0;
e551c32d 1856 newsk->sk_err_soft = 0;
87d11ceb 1857 newsk->sk_priority = 0;
2c8c56e1 1858 newsk->sk_incoming_cpu = raw_smp_processor_id();
648845ab
TZ
1859 if (likely(newsk->sk_net_refcnt))
1860 sock_inuse_add(sock_net(newsk), 1);
d979a39d 1861
4dc6dc71
ED
1862 /*
1863 * Before updating sk_refcnt, we must commit prior changes to memory
1864 * (Documentation/RCU/rculist_nulls.txt for details)
1865 */
1866 smp_wmb();
41c6d650 1867 refcount_set(&newsk->sk_refcnt, 2);
87d11ceb
ACM
1868
1869 /*
1870 * Increment the counter in the same struct proto as the master
1871 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1872 * is the same as sk->sk_prot->socks, as this field was copied
1873 * with memcpy).
1874 *
1875 * This _changes_ the previous behaviour, where
1876 * tcp_create_openreq_child always was incrementing the
1877 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1878 * to be taken into account in all callers. -acme
1879 */
1880 sk_refcnt_debug_inc(newsk);
972692e0 1881 sk_set_socket(newsk, NULL);
c2f26e8f 1882 RCU_INIT_POINTER(newsk->sk_wq, NULL);
87d11ceb
ACM
1883
1884 if (newsk->sk_prot->sockets_allocated)
180d8cd9 1885 sk_sockets_allocated_inc(newsk);
704da560 1886
080a270f
HFS
1887 if (sock_needs_netstamp(sk) &&
1888 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
704da560 1889 net_enable_timestamp();
87d11ceb
ACM
1890 }
1891out:
1892 return newsk;
1893}
e56c57d0 1894EXPORT_SYMBOL_GPL(sk_clone_lock);
87d11ceb 1895
94352d45
ACM
1896void sk_free_unlock_clone(struct sock *sk)
1897{
1898 /* It is still raw copy of parent, so invalidate
1899 * destructor and make plain sk_free() */
1900 sk->sk_destruct = NULL;
1901 bh_unlock_sock(sk);
1902 sk_free(sk);
1903}
1904EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
1905
9958089a
AK
1906void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1907{
d6a4e26a
ED
1908 u32 max_segs = 1;
1909
6bd4f355 1910 sk_dst_set(sk, dst);
0a6b2a1d 1911 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
9958089a 1912 if (sk->sk_route_caps & NETIF_F_GSO)
4fcd6b99 1913 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
a465419b 1914 sk->sk_route_caps &= ~sk->sk_route_nocaps;
9958089a 1915 if (sk_can_gso(sk)) {
f70f250a 1916 if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
9958089a 1917 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
82cc1a7a 1918 } else {
9958089a 1919 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
82cc1a7a 1920 sk->sk_gso_max_size = dst->dev->gso_max_size;
d6a4e26a 1921 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
82cc1a7a 1922 }
9958089a 1923 }
d6a4e26a 1924 sk->sk_gso_max_segs = max_segs;
9958089a
AK
1925}
1926EXPORT_SYMBOL_GPL(sk_setup_caps);
1927
1da177e4
LT
1928/*
1929 * Simple resource managers for sockets.
1930 */
1931
1932
4ec93edb
YH
1933/*
1934 * Write buffer destructor automatically called from kfree_skb.
1da177e4
LT
1935 */
1936void sock_wfree(struct sk_buff *skb)
1937{
1938 struct sock *sk = skb->sk;
d99927f4 1939 unsigned int len = skb->truesize;
1da177e4 1940
d99927f4
ED
1941 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1942 /*
1943 * Keep a reference on sk_wmem_alloc, this will be released
1944 * after sk_write_space() call
1945 */
14afee4b 1946 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
1da177e4 1947 sk->sk_write_space(sk);
d99927f4
ED
1948 len = 1;
1949 }
2b85a34e 1950 /*
d99927f4
ED
1951 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1952 * could not do because of in-flight packets
2b85a34e 1953 */
14afee4b 1954 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2b85a34e 1955 __sk_free(sk);
1da177e4 1956}
2a91525c 1957EXPORT_SYMBOL(sock_wfree);
1da177e4 1958
1d2077ac
ED
1959/* This variant of sock_wfree() is used by TCP,
1960 * since it sets SOCK_USE_WRITE_QUEUE.
1961 */
1962void __sock_wfree(struct sk_buff *skb)
1963{
1964 struct sock *sk = skb->sk;
1965
14afee4b 1966 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1d2077ac
ED
1967 __sk_free(sk);
1968}
1969
9e17f8a4
ED
1970void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1971{
1972 skb_orphan(skb);
1973 skb->sk = sk;
1974#ifdef CONFIG_INET
1975 if (unlikely(!sk_fullsock(sk))) {
1976 skb->destructor = sock_edemux;
1977 sock_hold(sk);
1978 return;
1979 }
1980#endif
1981 skb->destructor = sock_wfree;
1982 skb_set_hash_from_sk(skb, sk);
1983 /*
1984 * We used to take a refcount on sk, but following operation
1985 * is enough to guarantee sk_free() wont free this sock until
1986 * all in-flight packets are completed
1987 */
14afee4b 1988 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
9e17f8a4
ED
1989}
1990EXPORT_SYMBOL(skb_set_owner_w);
1991
1d2077ac
ED
1992/* This helper is used by netem, as it can hold packets in its
1993 * delay queue. We want to allow the owner socket to send more
1994 * packets, as if they were already TX completed by a typical driver.
1995 * But we also want to keep skb->sk set because some packet schedulers
f6ba8d33 1996 * rely on it (sch_fq for example).
1d2077ac 1997 */
f2f872f9
ED
1998void skb_orphan_partial(struct sk_buff *skb)
1999{
f6ba8d33 2000 if (skb_is_tcp_pure_ack(skb))
1d2077ac
ED
2001 return;
2002
f2f872f9
ED
2003 if (skb->destructor == sock_wfree
2004#ifdef CONFIG_INET
2005 || skb->destructor == tcp_wfree
2006#endif
2007 ) {
f6ba8d33
ED
2008 struct sock *sk = skb->sk;
2009
41c6d650 2010 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
14afee4b 2011 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
f6ba8d33
ED
2012 skb->destructor = sock_efree;
2013 }
f2f872f9
ED
2014 } else {
2015 skb_orphan(skb);
2016 }
2017}
2018EXPORT_SYMBOL(skb_orphan_partial);
2019
4ec93edb
YH
2020/*
2021 * Read buffer destructor automatically called from kfree_skb.
1da177e4
LT
2022 */
2023void sock_rfree(struct sk_buff *skb)
2024{
2025 struct sock *sk = skb->sk;
d361fd59 2026 unsigned int len = skb->truesize;
1da177e4 2027
d361fd59
ED
2028 atomic_sub(len, &sk->sk_rmem_alloc);
2029 sk_mem_uncharge(sk, len);
1da177e4 2030}
2a91525c 2031EXPORT_SYMBOL(sock_rfree);
1da177e4 2032
7768eed8
OH
2033/*
2034 * Buffer destructor for skbs that are not used directly in read or write
2035 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2036 */
62bccb8c
AD
2037void sock_efree(struct sk_buff *skb)
2038{
2039 sock_put(skb->sk);
2040}
2041EXPORT_SYMBOL(sock_efree);
2042
976d0201 2043kuid_t sock_i_uid(struct sock *sk)
1da177e4 2044{
976d0201 2045 kuid_t uid;
1da177e4 2046
f064af1e 2047 read_lock_bh(&sk->sk_callback_lock);
976d0201 2048 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
f064af1e 2049 read_unlock_bh(&sk->sk_callback_lock);
1da177e4
LT
2050 return uid;
2051}
2a91525c 2052EXPORT_SYMBOL(sock_i_uid);
1da177e4
LT
2053
2054unsigned long sock_i_ino(struct sock *sk)
2055{
2056 unsigned long ino;
2057
f064af1e 2058 read_lock_bh(&sk->sk_callback_lock);
1da177e4 2059 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
f064af1e 2060 read_unlock_bh(&sk->sk_callback_lock);
1da177e4
LT
2061 return ino;
2062}
2a91525c 2063EXPORT_SYMBOL(sock_i_ino);
1da177e4
LT
2064
2065/*
2066 * Allocate a skb from the socket's send buffer.
2067 */
86a76caf 2068struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
dd0fc66f 2069 gfp_t priority)
1da177e4 2070{
14afee4b 2071 if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
2a91525c 2072 struct sk_buff *skb = alloc_skb(size, priority);
1da177e4
LT
2073 if (skb) {
2074 skb_set_owner_w(skb, sk);
2075 return skb;
2076 }
2077 }
2078 return NULL;
2079}
2a91525c 2080EXPORT_SYMBOL(sock_wmalloc);
1da177e4 2081
98ba0bd5
WB
2082static void sock_ofree(struct sk_buff *skb)
2083{
2084 struct sock *sk = skb->sk;
2085
2086 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2087}
2088
2089struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2090 gfp_t priority)
2091{
2092 struct sk_buff *skb;
2093
2094 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2095 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2096 sysctl_optmem_max)
2097 return NULL;
2098
2099 skb = alloc_skb(size, priority);
2100 if (!skb)
2101 return NULL;
2102
2103 atomic_add(skb->truesize, &sk->sk_omem_alloc);
2104 skb->sk = sk;
2105 skb->destructor = sock_ofree;
2106 return skb;
2107}
2108
4ec93edb 2109/*
1da177e4 2110 * Allocate a memory block from the socket's option memory buffer.
4ec93edb 2111 */
dd0fc66f 2112void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1da177e4 2113{
95c96174 2114 if ((unsigned int)size <= sysctl_optmem_max &&
1da177e4
LT
2115 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
2116 void *mem;
2117 /* First do the add, to avoid the race if kmalloc
4ec93edb 2118 * might sleep.
1da177e4
LT
2119 */
2120 atomic_add(size, &sk->sk_omem_alloc);
2121 mem = kmalloc(size, priority);
2122 if (mem)
2123 return mem;
2124 atomic_sub(size, &sk->sk_omem_alloc);
2125 }
2126 return NULL;
2127}
2a91525c 2128EXPORT_SYMBOL(sock_kmalloc);
1da177e4 2129
79e88659
DB
2130/* Free an option memory block. Note, we actually want the inline
2131 * here as this allows gcc to detect the nullify and fold away the
2132 * condition entirely.
1da177e4 2133 */
79e88659
DB
2134static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2135 const bool nullify)
1da177e4 2136{
e53da5fb
DM
2137 if (WARN_ON_ONCE(!mem))
2138 return;
79e88659
DB
2139 if (nullify)
2140 kzfree(mem);
2141 else
2142 kfree(mem);
1da177e4
LT
2143 atomic_sub(size, &sk->sk_omem_alloc);
2144}
79e88659
DB
2145
2146void sock_kfree_s(struct sock *sk, void *mem, int size)
2147{
2148 __sock_kfree_s(sk, mem, size, false);
2149}
2a91525c 2150EXPORT_SYMBOL(sock_kfree_s);
1da177e4 2151
79e88659
DB
2152void sock_kzfree_s(struct sock *sk, void *mem, int size)
2153{
2154 __sock_kfree_s(sk, mem, size, true);
2155}
2156EXPORT_SYMBOL(sock_kzfree_s);
2157
1da177e4
LT
2158/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2159 I think, these locks should be removed for datagram sockets.
2160 */
2a91525c 2161static long sock_wait_for_wmem(struct sock *sk, long timeo)
1da177e4
LT
2162{
2163 DEFINE_WAIT(wait);
2164
9cd3e072 2165 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1da177e4
LT
2166 for (;;) {
2167 if (!timeo)
2168 break;
2169 if (signal_pending(current))
2170 break;
2171 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
aa395145 2172 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
14afee4b 2173 if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1da177e4
LT
2174 break;
2175 if (sk->sk_shutdown & SEND_SHUTDOWN)
2176 break;
2177 if (sk->sk_err)
2178 break;
2179 timeo = schedule_timeout(timeo);
2180 }
aa395145 2181 finish_wait(sk_sleep(sk), &wait);
1da177e4
LT
2182 return timeo;
2183}
2184
2185
2186/*
2187 * Generic send/receive buffer handlers
2188 */
2189
4cc7f68d
HX
2190struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2191 unsigned long data_len, int noblock,
28d64271 2192 int *errcode, int max_page_order)
1da177e4 2193{
2e4e4410 2194 struct sk_buff *skb;
1da177e4
LT
2195 long timeo;
2196 int err;
2197
1da177e4 2198 timeo = sock_sndtimeo(sk, noblock);
2e4e4410 2199 for (;;) {
1da177e4
LT
2200 err = sock_error(sk);
2201 if (err != 0)
2202 goto failure;
2203
2204 err = -EPIPE;
2205 if (sk->sk_shutdown & SEND_SHUTDOWN)
2206 goto failure;
2207
2e4e4410
ED
2208 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
2209 break;
28d64271 2210
9cd3e072 2211 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2e4e4410
ED
2212 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2213 err = -EAGAIN;
2214 if (!timeo)
1da177e4 2215 goto failure;
2e4e4410
ED
2216 if (signal_pending(current))
2217 goto interrupted;
2218 timeo = sock_wait_for_wmem(sk, timeo);
1da177e4 2219 }
2e4e4410
ED
2220 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2221 errcode, sk->sk_allocation);
2222 if (skb)
2223 skb_set_owner_w(skb, sk);
1da177e4
LT
2224 return skb;
2225
2226interrupted:
2227 err = sock_intr_errno(timeo);
2228failure:
2229 *errcode = err;
2230 return NULL;
2231}
4cc7f68d 2232EXPORT_SYMBOL(sock_alloc_send_pskb);
1da177e4 2233
4ec93edb 2234struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1da177e4
LT
2235 int noblock, int *errcode)
2236{
28d64271 2237 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1da177e4 2238}
2a91525c 2239EXPORT_SYMBOL(sock_alloc_send_skb);
1da177e4 2240
39771b12
WB
2241int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2242 struct sockcm_cookie *sockc)
2243{
3dd17e63
SHY
2244 u32 tsflags;
2245
39771b12
WB
2246 switch (cmsg->cmsg_type) {
2247 case SO_MARK:
2248 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2249 return -EPERM;
2250 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2251 return -EINVAL;
2252 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2253 break;
7f1bc6e9 2254 case SO_TIMESTAMPING_OLD:
3dd17e63
SHY
2255 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2256 return -EINVAL;
2257
2258 tsflags = *(u32 *)CMSG_DATA(cmsg);
2259 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2260 return -EINVAL;
2261
2262 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2263 sockc->tsflags |= tsflags;
2264 break;
80b14dee
RC
2265 case SCM_TXTIME:
2266 if (!sock_flag(sk, SOCK_TXTIME))
2267 return -EINVAL;
2268 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2269 return -EINVAL;
2270 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2271 break;
779f1ede
SHY
2272 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2273 case SCM_RIGHTS:
2274 case SCM_CREDENTIALS:
2275 break;
39771b12
WB
2276 default:
2277 return -EINVAL;
2278 }
2279 return 0;
2280}
2281EXPORT_SYMBOL(__sock_cmsg_send);
2282
f28ea365
EJ
2283int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2284 struct sockcm_cookie *sockc)
2285{
2286 struct cmsghdr *cmsg;
39771b12 2287 int ret;
f28ea365
EJ
2288
2289 for_each_cmsghdr(cmsg, msg) {
2290 if (!CMSG_OK(msg, cmsg))
2291 return -EINVAL;
2292 if (cmsg->cmsg_level != SOL_SOCKET)
2293 continue;
39771b12
WB
2294 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2295 if (ret)
2296 return ret;
f28ea365
EJ
2297 }
2298 return 0;
2299}
2300EXPORT_SYMBOL(sock_cmsg_send);
2301
06044751
ED
2302static void sk_enter_memory_pressure(struct sock *sk)
2303{
2304 if (!sk->sk_prot->enter_memory_pressure)
2305 return;
2306
2307 sk->sk_prot->enter_memory_pressure(sk);
2308}
2309
2310static void sk_leave_memory_pressure(struct sock *sk)
2311{
2312 if (sk->sk_prot->leave_memory_pressure) {
2313 sk->sk_prot->leave_memory_pressure(sk);
2314 } else {
2315 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2316
2317 if (memory_pressure && *memory_pressure)
2318 *memory_pressure = 0;
2319 }
2320}
2321
5640f768
ED
2322/* On 32bit arches, an skb frag is limited to 2^15 */
2323#define SKB_FRAG_PAGE_ORDER get_order(32768)
2324
400dfd3a
ED
2325/**
2326 * skb_page_frag_refill - check that a page_frag contains enough room
2327 * @sz: minimum size of the fragment we want to get
2328 * @pfrag: pointer to page_frag
82d5e2b8 2329 * @gfp: priority for memory allocation
400dfd3a
ED
2330 *
2331 * Note: While this allocator tries to use high order pages, there is
2332 * no guarantee that allocations succeed. Therefore, @sz MUST be
2333 * less or equal than PAGE_SIZE.
2334 */
d9b2938a 2335bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
5640f768 2336{
5640f768 2337 if (pfrag->page) {
fe896d18 2338 if (page_ref_count(pfrag->page) == 1) {
5640f768
ED
2339 pfrag->offset = 0;
2340 return true;
2341 }
400dfd3a 2342 if (pfrag->offset + sz <= pfrag->size)
5640f768
ED
2343 return true;
2344 put_page(pfrag->page);
2345 }
2346
d9b2938a
ED
2347 pfrag->offset = 0;
2348 if (SKB_FRAG_PAGE_ORDER) {
d0164adc
MG
2349 /* Avoid direct reclaim but allow kswapd to wake */
2350 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2351 __GFP_COMP | __GFP_NOWARN |
2352 __GFP_NORETRY,
d9b2938a 2353 SKB_FRAG_PAGE_ORDER);
5640f768 2354 if (likely(pfrag->page)) {
d9b2938a 2355 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
5640f768
ED
2356 return true;
2357 }
d9b2938a
ED
2358 }
2359 pfrag->page = alloc_page(gfp);
2360 if (likely(pfrag->page)) {
2361 pfrag->size = PAGE_SIZE;
2362 return true;
2363 }
400dfd3a
ED
2364 return false;
2365}
2366EXPORT_SYMBOL(skb_page_frag_refill);
2367
2368bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2369{
2370 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2371 return true;
2372
5640f768
ED
2373 sk_enter_memory_pressure(sk);
2374 sk_stream_moderate_sndbuf(sk);
2375 return false;
2376}
2377EXPORT_SYMBOL(sk_page_frag_refill);
2378
1da177e4 2379static void __lock_sock(struct sock *sk)
f39234d6
NK
2380 __releases(&sk->sk_lock.slock)
2381 __acquires(&sk->sk_lock.slock)
1da177e4
LT
2382{
2383 DEFINE_WAIT(wait);
2384
e71a4783 2385 for (;;) {
1da177e4
LT
2386 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2387 TASK_UNINTERRUPTIBLE);
2388 spin_unlock_bh(&sk->sk_lock.slock);
2389 schedule();
2390 spin_lock_bh(&sk->sk_lock.slock);
e71a4783 2391 if (!sock_owned_by_user(sk))
1da177e4
LT
2392 break;
2393 }
2394 finish_wait(&sk->sk_lock.wq, &wait);
2395}
2396
8873c064 2397void __release_sock(struct sock *sk)
f39234d6
NK
2398 __releases(&sk->sk_lock.slock)
2399 __acquires(&sk->sk_lock.slock)
1da177e4 2400{
5413d1ba 2401 struct sk_buff *skb, *next;
1da177e4 2402
5413d1ba 2403 while ((skb = sk->sk_backlog.head) != NULL) {
1da177e4 2404 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1da177e4 2405
5413d1ba 2406 spin_unlock_bh(&sk->sk_lock.slock);
1da177e4 2407
5413d1ba
ED
2408 do {
2409 next = skb->next;
e4cbb02a 2410 prefetch(next);
7fee226a 2411 WARN_ON_ONCE(skb_dst_is_noref(skb));
a8305bff 2412 skb_mark_not_on_list(skb);
c57943a1 2413 sk_backlog_rcv(sk, skb);
1da177e4 2414
5413d1ba 2415 cond_resched();
1da177e4
LT
2416
2417 skb = next;
2418 } while (skb != NULL);
2419
5413d1ba
ED
2420 spin_lock_bh(&sk->sk_lock.slock);
2421 }
8eae939f
ZY
2422
2423 /*
2424 * Doing the zeroing here guarantee we can not loop forever
2425 * while a wild producer attempts to flood us.
2426 */
2427 sk->sk_backlog.len = 0;
1da177e4
LT
2428}
2429
d41a69f1
ED
2430void __sk_flush_backlog(struct sock *sk)
2431{
2432 spin_lock_bh(&sk->sk_lock.slock);
2433 __release_sock(sk);
2434 spin_unlock_bh(&sk->sk_lock.slock);
2435}
2436
1da177e4
LT
2437/**
2438 * sk_wait_data - wait for data to arrive at sk_receive_queue
4dc3b16b
PP
2439 * @sk: sock to wait on
2440 * @timeo: for how long
dfbafc99 2441 * @skb: last skb seen on sk_receive_queue
1da177e4
LT
2442 *
2443 * Now socket state including sk->sk_err is changed only under lock,
2444 * hence we may omit checks after joining wait queue.
2445 * We check receive queue before schedule() only as optimization;
2446 * it is very likely that release_sock() added new data.
2447 */
dfbafc99 2448int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1da177e4 2449{
d9dc8b0f 2450 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1da177e4 2451 int rc;
1da177e4 2452
d9dc8b0f 2453 add_wait_queue(sk_sleep(sk), &wait);
9cd3e072 2454 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
d9dc8b0f 2455 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
9cd3e072 2456 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
d9dc8b0f 2457 remove_wait_queue(sk_sleep(sk), &wait);
1da177e4
LT
2458 return rc;
2459}
1da177e4
LT
2460EXPORT_SYMBOL(sk_wait_data);
2461
3ab224be 2462/**
f8c3bf00 2463 * __sk_mem_raise_allocated - increase memory_allocated
3ab224be
HA
2464 * @sk: socket
2465 * @size: memory size to allocate
f8c3bf00 2466 * @amt: pages to allocate
3ab224be
HA
2467 * @kind: allocation type
2468 *
f8c3bf00 2469 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
3ab224be 2470 */
f8c3bf00 2471int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
3ab224be
HA
2472{
2473 struct proto *prot = sk->sk_prot;
f8c3bf00 2474 long allocated = sk_memory_allocated_add(sk, amt);
d6f19938 2475 bool charged = true;
e805605c 2476
baac50bb 2477 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
d6f19938 2478 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
e805605c 2479 goto suppress_allocation;
3ab224be
HA
2480
2481 /* Under limit. */
e805605c 2482 if (allocated <= sk_prot_mem_limits(sk, 0)) {
180d8cd9 2483 sk_leave_memory_pressure(sk);
3ab224be
HA
2484 return 1;
2485 }
2486
e805605c
JW
2487 /* Under pressure. */
2488 if (allocated > sk_prot_mem_limits(sk, 1))
180d8cd9 2489 sk_enter_memory_pressure(sk);
3ab224be 2490
e805605c
JW
2491 /* Over hard limit. */
2492 if (allocated > sk_prot_mem_limits(sk, 2))
3ab224be
HA
2493 goto suppress_allocation;
2494
2495 /* guarantee minimum buffer size under pressure */
2496 if (kind == SK_MEM_RECV) {
a3dcaf17 2497 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
3ab224be 2498 return 1;
180d8cd9 2499
3ab224be 2500 } else { /* SK_MEM_SEND */
a3dcaf17
ED
2501 int wmem0 = sk_get_wmem0(sk, prot);
2502
3ab224be 2503 if (sk->sk_type == SOCK_STREAM) {
a3dcaf17 2504 if (sk->sk_wmem_queued < wmem0)
3ab224be 2505 return 1;
a3dcaf17 2506 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
3ab224be 2507 return 1;
a3dcaf17 2508 }
3ab224be
HA
2509 }
2510
180d8cd9 2511 if (sk_has_memory_pressure(sk)) {
5bf325a5 2512 u64 alloc;
1748376b 2513
180d8cd9 2514 if (!sk_under_memory_pressure(sk))
1748376b 2515 return 1;
180d8cd9
GC
2516 alloc = sk_sockets_allocated_read_positive(sk);
2517 if (sk_prot_mem_limits(sk, 2) > alloc *
3ab224be
HA
2518 sk_mem_pages(sk->sk_wmem_queued +
2519 atomic_read(&sk->sk_rmem_alloc) +
2520 sk->sk_forward_alloc))
2521 return 1;
2522 }
2523
2524suppress_allocation:
2525
2526 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2527 sk_stream_moderate_sndbuf(sk);
2528
2529 /* Fail only if socket is _under_ its sndbuf.
2530 * In this case we cannot block, so that we have to fail.
2531 */
2532 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2533 return 1;
2534 }
2535
d6f19938
YS
2536 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
2537 trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
3847ce32 2538
0e90b31f 2539 sk_memory_allocated_sub(sk, amt);
180d8cd9 2540
baac50bb
JW
2541 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2542 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
e805605c 2543
3ab224be
HA
2544 return 0;
2545}
f8c3bf00
PA
2546EXPORT_SYMBOL(__sk_mem_raise_allocated);
2547
2548/**
2549 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2550 * @sk: socket
2551 * @size: memory size to allocate
2552 * @kind: allocation type
2553 *
2554 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2555 * rmem allocation. This function assumes that protocols which have
2556 * memory_pressure use sk_wmem_queued as write buffer accounting.
2557 */
2558int __sk_mem_schedule(struct sock *sk, int size, int kind)
2559{
2560 int ret, amt = sk_mem_pages(size);
2561
2562 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2563 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2564 if (!ret)
2565 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2566 return ret;
2567}
3ab224be
HA
2568EXPORT_SYMBOL(__sk_mem_schedule);
2569
2570/**
f8c3bf00 2571 * __sk_mem_reduce_allocated - reclaim memory_allocated
3ab224be 2572 * @sk: socket
f8c3bf00
PA
2573 * @amount: number of quanta
2574 *
2575 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
3ab224be 2576 */
f8c3bf00 2577void __sk_mem_reduce_allocated(struct sock *sk, int amount)
3ab224be 2578{
1a24e04e 2579 sk_memory_allocated_sub(sk, amount);
3ab224be 2580
baac50bb
JW
2581 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2582 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
e805605c 2583
180d8cd9
GC
2584 if (sk_under_memory_pressure(sk) &&
2585 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2586 sk_leave_memory_pressure(sk);
3ab224be 2587}
f8c3bf00
PA
2588EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2589
2590/**
2591 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2592 * @sk: socket
2593 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2594 */
2595void __sk_mem_reclaim(struct sock *sk, int amount)
2596{
2597 amount >>= SK_MEM_QUANTUM_SHIFT;
2598 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2599 __sk_mem_reduce_allocated(sk, amount);
2600}
3ab224be
HA
2601EXPORT_SYMBOL(__sk_mem_reclaim);
2602
627d2d6b 2603int sk_set_peek_off(struct sock *sk, int val)
2604{
627d2d6b 2605 sk->sk_peek_off = val;
2606 return 0;
2607}
2608EXPORT_SYMBOL_GPL(sk_set_peek_off);
3ab224be 2609
1da177e4
LT
2610/*
2611 * Set of default routines for initialising struct proto_ops when
2612 * the protocol does not support a particular function. In certain
2613 * cases where it makes no sense for a protocol to have a "do nothing"
2614 * function, some default processing is provided.
2615 */
2616
2617int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2618{
2619 return -EOPNOTSUPP;
2620}
2a91525c 2621EXPORT_SYMBOL(sock_no_bind);
1da177e4 2622
4ec93edb 2623int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1da177e4
LT
2624 int len, int flags)
2625{
2626 return -EOPNOTSUPP;
2627}
2a91525c 2628EXPORT_SYMBOL(sock_no_connect);
1da177e4
LT
2629
2630int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2631{
2632 return -EOPNOTSUPP;
2633}
2a91525c 2634EXPORT_SYMBOL(sock_no_socketpair);
1da177e4 2635
cdfbabfb
DH
2636int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2637 bool kern)
1da177e4
LT
2638{
2639 return -EOPNOTSUPP;
2640}
2a91525c 2641EXPORT_SYMBOL(sock_no_accept);
1da177e4 2642
4ec93edb 2643int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
9b2c45d4 2644 int peer)
1da177e4
LT
2645{
2646 return -EOPNOTSUPP;
2647}
2a91525c 2648EXPORT_SYMBOL(sock_no_getname);
1da177e4 2649
1da177e4
LT
2650int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2651{
2652 return -EOPNOTSUPP;
2653}
2a91525c 2654EXPORT_SYMBOL(sock_no_ioctl);
1da177e4
LT
2655
2656int sock_no_listen(struct socket *sock, int backlog)
2657{
2658 return -EOPNOTSUPP;
2659}
2a91525c 2660EXPORT_SYMBOL(sock_no_listen);
1da177e4
LT
2661
2662int sock_no_shutdown(struct socket *sock, int how)
2663{
2664 return -EOPNOTSUPP;
2665}
2a91525c 2666EXPORT_SYMBOL(sock_no_shutdown);
1da177e4
LT
2667
2668int sock_no_setsockopt(struct socket *sock, int level, int optname,
b7058842 2669 char __user *optval, unsigned int optlen)
1da177e4
LT
2670{
2671 return -EOPNOTSUPP;
2672}
2a91525c 2673EXPORT_SYMBOL(sock_no_setsockopt);
1da177e4
LT
2674
2675int sock_no_getsockopt(struct socket *sock, int level, int optname,
2676 char __user *optval, int __user *optlen)
2677{
2678 return -EOPNOTSUPP;
2679}
2a91525c 2680EXPORT_SYMBOL(sock_no_getsockopt);
1da177e4 2681
1b784140 2682int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
1da177e4
LT
2683{
2684 return -EOPNOTSUPP;
2685}
2a91525c 2686EXPORT_SYMBOL(sock_no_sendmsg);
1da177e4 2687
306b13eb
TH
2688int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2689{
2690 return -EOPNOTSUPP;
2691}
2692EXPORT_SYMBOL(sock_no_sendmsg_locked);
2693
1b784140
YX
2694int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2695 int flags)
1da177e4
LT
2696{
2697 return -EOPNOTSUPP;
2698}
2a91525c 2699EXPORT_SYMBOL(sock_no_recvmsg);
1da177e4
LT
2700
2701int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2702{
2703 /* Mirror missing mmap method error code */
2704 return -ENODEV;
2705}
2a91525c 2706EXPORT_SYMBOL(sock_no_mmap);
1da177e4
LT
2707
2708ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2709{
2710 ssize_t res;
2711 struct msghdr msg = {.msg_flags = flags};
2712 struct kvec iov;
2713 char *kaddr = kmap(page);
2714 iov.iov_base = kaddr + offset;
2715 iov.iov_len = size;
2716 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2717 kunmap(page);
2718 return res;
2719}
2a91525c 2720EXPORT_SYMBOL(sock_no_sendpage);
1da177e4 2721
306b13eb
TH
2722ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2723 int offset, size_t size, int flags)
2724{
2725 ssize_t res;
2726 struct msghdr msg = {.msg_flags = flags};
2727 struct kvec iov;
2728 char *kaddr = kmap(page);
2729
2730 iov.iov_base = kaddr + offset;
2731 iov.iov_len = size;
2732 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2733 kunmap(page);
2734 return res;
2735}
2736EXPORT_SYMBOL(sock_no_sendpage_locked);
2737
1da177e4
LT
2738/*
2739 * Default Socket Callbacks
2740 */
2741
2742static void sock_def_wakeup(struct sock *sk)
2743{
43815482
ED
2744 struct socket_wq *wq;
2745
2746 rcu_read_lock();
2747 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 2748 if (skwq_has_sleeper(wq))
43815482
ED
2749 wake_up_interruptible_all(&wq->wait);
2750 rcu_read_unlock();
1da177e4
LT
2751}
2752
2753static void sock_def_error_report(struct sock *sk)
2754{
43815482
ED
2755 struct socket_wq *wq;
2756
2757 rcu_read_lock();
2758 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 2759 if (skwq_has_sleeper(wq))
a9a08845 2760 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
8d8ad9d7 2761 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
43815482 2762 rcu_read_unlock();
1da177e4
LT
2763}
2764
676d2369 2765static void sock_def_readable(struct sock *sk)
1da177e4 2766{
43815482
ED
2767 struct socket_wq *wq;
2768
2769 rcu_read_lock();
2770 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 2771 if (skwq_has_sleeper(wq))
a9a08845
LT
2772 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2773 EPOLLRDNORM | EPOLLRDBAND);
8d8ad9d7 2774 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
43815482 2775 rcu_read_unlock();
1da177e4
LT
2776}
2777
2778static void sock_def_write_space(struct sock *sk)
2779{
43815482
ED
2780 struct socket_wq *wq;
2781
2782 rcu_read_lock();
1da177e4
LT
2783
2784 /* Do not wake up a writer until he can make "significant"
2785 * progress. --DaveM
2786 */
14afee4b 2787 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
43815482 2788 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 2789 if (skwq_has_sleeper(wq))
a9a08845
LT
2790 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2791 EPOLLWRNORM | EPOLLWRBAND);
1da177e4
LT
2792
2793 /* Should agree with poll, otherwise some programs break */
2794 if (sock_writeable(sk))
8d8ad9d7 2795 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4
LT
2796 }
2797
43815482 2798 rcu_read_unlock();
1da177e4
LT
2799}
2800
2801static void sock_def_destruct(struct sock *sk)
2802{
1da177e4
LT
2803}
2804
2805void sk_send_sigurg(struct sock *sk)
2806{
2807 if (sk->sk_socket && sk->sk_socket->file)
2808 if (send_sigurg(&sk->sk_socket->file->f_owner))
8d8ad9d7 2809 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1da177e4 2810}
2a91525c 2811EXPORT_SYMBOL(sk_send_sigurg);
1da177e4
LT
2812
2813void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2814 unsigned long expires)
2815{
2816 if (!mod_timer(timer, expires))
2817 sock_hold(sk);
2818}
1da177e4
LT
2819EXPORT_SYMBOL(sk_reset_timer);
2820
2821void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2822{
25cc4ae9 2823 if (del_timer(timer))
1da177e4
LT
2824 __sock_put(sk);
2825}
1da177e4
LT
2826EXPORT_SYMBOL(sk_stop_timer);
2827
2828void sock_init_data(struct socket *sock, struct sock *sk)
2829{
581319c5 2830 sk_init_common(sk);
1da177e4
LT
2831 sk->sk_send_head = NULL;
2832
99767f27 2833 timer_setup(&sk->sk_timer, NULL, 0);
4ec93edb 2834
1da177e4
LT
2835 sk->sk_allocation = GFP_KERNEL;
2836 sk->sk_rcvbuf = sysctl_rmem_default;
2837 sk->sk_sndbuf = sysctl_wmem_default;
2838 sk->sk_state = TCP_CLOSE;
972692e0 2839 sk_set_socket(sk, sock);
1da177e4
LT
2840
2841 sock_set_flag(sk, SOCK_ZAPPED);
2842
e71a4783 2843 if (sock) {
1da177e4 2844 sk->sk_type = sock->type;
c2f26e8f 2845 RCU_INIT_POINTER(sk->sk_wq, sock->wq);
1da177e4 2846 sock->sk = sk;
86741ec2
LC
2847 sk->sk_uid = SOCK_INODE(sock)->i_uid;
2848 } else {
c2f26e8f 2849 RCU_INIT_POINTER(sk->sk_wq, NULL);
86741ec2
LC
2850 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
2851 }
1da177e4 2852
1da177e4 2853 rwlock_init(&sk->sk_callback_lock);
cdfbabfb
DH
2854 if (sk->sk_kern_sock)
2855 lockdep_set_class_and_name(
2856 &sk->sk_callback_lock,
2857 af_kern_callback_keys + sk->sk_family,
2858 af_family_kern_clock_key_strings[sk->sk_family]);
2859 else
2860 lockdep_set_class_and_name(
2861 &sk->sk_callback_lock,
443aef0e
PZ
2862 af_callback_keys + sk->sk_family,
2863 af_family_clock_key_strings[sk->sk_family]);
1da177e4
LT
2864
2865 sk->sk_state_change = sock_def_wakeup;
2866 sk->sk_data_ready = sock_def_readable;
2867 sk->sk_write_space = sock_def_write_space;
2868 sk->sk_error_report = sock_def_error_report;
2869 sk->sk_destruct = sock_def_destruct;
2870
5640f768
ED
2871 sk->sk_frag.page = NULL;
2872 sk->sk_frag.offset = 0;
ef64a54f 2873 sk->sk_peek_off = -1;
1da177e4 2874
109f6e39
EB
2875 sk->sk_peer_pid = NULL;
2876 sk->sk_peer_cred = NULL;
1da177e4
LT
2877 sk->sk_write_pending = 0;
2878 sk->sk_rcvlowat = 1;
2879 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2880 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2881
6c7c98ba 2882 sk->sk_stamp = SK_DEFAULT_STAMP;
3a0ed3e9
DD
2883#if BITS_PER_LONG==32
2884 seqlock_init(&sk->sk_stamp_seq);
2885#endif
52267790 2886 atomic_set(&sk->sk_zckey, 0);
1da177e4 2887
e0d1095a 2888#ifdef CONFIG_NET_RX_BUSY_POLL
06021292 2889 sk->sk_napi_id = 0;
64b0dc51 2890 sk->sk_ll_usec = sysctl_net_busy_read;
06021292
ET
2891#endif
2892
76a9ebe8
ED
2893 sk->sk_max_pacing_rate = ~0UL;
2894 sk->sk_pacing_rate = ~0UL;
3a9b76fd 2895 sk->sk_pacing_shift = 10;
70da268b 2896 sk->sk_incoming_cpu = -1;
c6345ce7
AN
2897
2898 sk_rx_queue_clear(sk);
4dc6dc71
ED
2899 /*
2900 * Before updating sk_refcnt, we must commit prior changes to memory
2901 * (Documentation/RCU/rculist_nulls.txt for details)
2902 */
2903 smp_wmb();
41c6d650 2904 refcount_set(&sk->sk_refcnt, 1);
33c732c3 2905 atomic_set(&sk->sk_drops, 0);
1da177e4 2906}
2a91525c 2907EXPORT_SYMBOL(sock_init_data);
1da177e4 2908
b5606c2d 2909void lock_sock_nested(struct sock *sk, int subclass)
1da177e4
LT
2910{
2911 might_sleep();
a5b5bb9a 2912 spin_lock_bh(&sk->sk_lock.slock);
d2e9117c 2913 if (sk->sk_lock.owned)
1da177e4 2914 __lock_sock(sk);
d2e9117c 2915 sk->sk_lock.owned = 1;
a5b5bb9a
IM
2916 spin_unlock(&sk->sk_lock.slock);
2917 /*
2918 * The sk_lock has mutex_lock() semantics here:
2919 */
fcc70d5f 2920 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
a5b5bb9a 2921 local_bh_enable();
1da177e4 2922}
fcc70d5f 2923EXPORT_SYMBOL(lock_sock_nested);
1da177e4 2924
b5606c2d 2925void release_sock(struct sock *sk)
1da177e4 2926{
a5b5bb9a 2927 spin_lock_bh(&sk->sk_lock.slock);
1da177e4
LT
2928 if (sk->sk_backlog.tail)
2929 __release_sock(sk);
46d3ceab 2930
c3f9b018
ED
2931 /* Warning : release_cb() might need to release sk ownership,
2932 * ie call sock_release_ownership(sk) before us.
2933 */
46d3ceab
ED
2934 if (sk->sk_prot->release_cb)
2935 sk->sk_prot->release_cb(sk);
2936
c3f9b018 2937 sock_release_ownership(sk);
a5b5bb9a
IM
2938 if (waitqueue_active(&sk->sk_lock.wq))
2939 wake_up(&sk->sk_lock.wq);
2940 spin_unlock_bh(&sk->sk_lock.slock);
1da177e4
LT
2941}
2942EXPORT_SYMBOL(release_sock);
2943
8a74ad60
ED
2944/**
2945 * lock_sock_fast - fast version of lock_sock
2946 * @sk: socket
2947 *
2948 * This version should be used for very small section, where process wont block
d651983d
MCC
2949 * return false if fast path is taken:
2950 *
8a74ad60 2951 * sk_lock.slock locked, owned = 0, BH disabled
d651983d
MCC
2952 *
2953 * return true if slow path is taken:
2954 *
8a74ad60
ED
2955 * sk_lock.slock unlocked, owned = 1, BH enabled
2956 */
2957bool lock_sock_fast(struct sock *sk)
2958{
2959 might_sleep();
2960 spin_lock_bh(&sk->sk_lock.slock);
2961
2962 if (!sk->sk_lock.owned)
2963 /*
2964 * Note : We must disable BH
2965 */
2966 return false;
2967
2968 __lock_sock(sk);
2969 sk->sk_lock.owned = 1;
2970 spin_unlock(&sk->sk_lock.slock);
2971 /*
2972 * The sk_lock has mutex_lock() semantics here:
2973 */
2974 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2975 local_bh_enable();
2976 return true;
2977}
2978EXPORT_SYMBOL(lock_sock_fast);
2979
c7cbdbf2
AB
2980int sock_gettstamp(struct socket *sock, void __user *userstamp,
2981 bool timeval, bool time32)
4ec93edb 2982{
c7cbdbf2
AB
2983 struct sock *sk = sock->sk;
2984 struct timespec64 ts;
9dae3497
YS
2985
2986 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
c7cbdbf2
AB
2987 ts = ktime_to_timespec64(sock_read_timestamp(sk));
2988 if (ts.tv_sec == -1)
1da177e4 2989 return -ENOENT;
c7cbdbf2 2990 if (ts.tv_sec == 0) {
3a0ed3e9 2991 ktime_t kt = ktime_get_real();
c7cbdbf2
AB
2992 sock_write_timestamp(sk, kt);;
2993 ts = ktime_to_timespec64(kt);
b7aa0bf7 2994 }
1da177e4 2995
c7cbdbf2
AB
2996 if (timeval)
2997 ts.tv_nsec /= 1000;
9dae3497 2998
c7cbdbf2
AB
2999#ifdef CONFIG_COMPAT_32BIT_TIME
3000 if (time32)
3001 return put_old_timespec32(&ts, userstamp);
3002#endif
3003#ifdef CONFIG_SPARC64
3004 /* beware of padding in sparc64 timeval */
3005 if (timeval && !in_compat_syscall()) {
3006 struct __kernel_old_timeval __user tv = {
3007 .tv_sec = ts.tv_sec;
3008 .tv_usec = ts.tv_nsec;
3009 };
3010 if (copy_to_user(userstamp, &tv, sizeof(tv))
3011 return -EFAULT;
3012 return 0;
ae40eb1e 3013 }
c7cbdbf2
AB
3014#endif
3015 return put_timespec64(&ts, userstamp);
ae40eb1e 3016}
c7cbdbf2 3017EXPORT_SYMBOL(sock_gettstamp);
ae40eb1e 3018
20d49473 3019void sock_enable_timestamp(struct sock *sk, int flag)
4ec93edb 3020{
20d49473 3021 if (!sock_flag(sk, flag)) {
08e29af3
ED
3022 unsigned long previous_flags = sk->sk_flags;
3023
20d49473
PO
3024 sock_set_flag(sk, flag);
3025 /*
3026 * we just set one of the two flags which require net
3027 * time stamping, but time stamping might have been on
3028 * already because of the other one
3029 */
080a270f
HFS
3030 if (sock_needs_netstamp(sk) &&
3031 !(previous_flags & SK_FLAGS_TIMESTAMP))
20d49473 3032 net_enable_timestamp();
1da177e4
LT
3033 }
3034}
1da177e4 3035
cb820f8e
RC
3036int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3037 int level, int type)
3038{
3039 struct sock_exterr_skb *serr;
364a9e93 3040 struct sk_buff *skb;
cb820f8e
RC
3041 int copied, err;
3042
3043 err = -EAGAIN;
364a9e93 3044 skb = sock_dequeue_err_skb(sk);
cb820f8e
RC
3045 if (skb == NULL)
3046 goto out;
3047
3048 copied = skb->len;
3049 if (copied > len) {
3050 msg->msg_flags |= MSG_TRUNC;
3051 copied = len;
3052 }
51f3d02b 3053 err = skb_copy_datagram_msg(skb, 0, msg, copied);
cb820f8e
RC
3054 if (err)
3055 goto out_free_skb;
3056
3057 sock_recv_timestamp(msg, sk, skb);
3058
3059 serr = SKB_EXT_ERR(skb);
3060 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3061
3062 msg->msg_flags |= MSG_ERRQUEUE;
3063 err = copied;
3064
cb820f8e
RC
3065out_free_skb:
3066 kfree_skb(skb);
3067out:
3068 return err;
3069}
3070EXPORT_SYMBOL(sock_recv_errqueue);
3071
1da177e4
LT
3072/*
3073 * Get a socket option on an socket.
3074 *
3075 * FIX: POSIX 1003.1g is very ambiguous here. It states that
3076 * asynchronous errors should be reported by getsockopt. We assume
3077 * this means if you specify SO_ERROR (otherwise whats the point of it).
3078 */
3079int sock_common_getsockopt(struct socket *sock, int level, int optname,
3080 char __user *optval, int __user *optlen)
3081{
3082 struct sock *sk = sock->sk;
3083
3084 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3085}
1da177e4
LT
3086EXPORT_SYMBOL(sock_common_getsockopt);
3087
3fdadf7d 3088#ifdef CONFIG_COMPAT
543d9cfe
ACM
3089int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
3090 char __user *optval, int __user *optlen)
3fdadf7d
DM
3091{
3092 struct sock *sk = sock->sk;
3093
1e51f951 3094 if (sk->sk_prot->compat_getsockopt != NULL)
543d9cfe
ACM
3095 return sk->sk_prot->compat_getsockopt(sk, level, optname,
3096 optval, optlen);
3fdadf7d
DM
3097 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3098}
3099EXPORT_SYMBOL(compat_sock_common_getsockopt);
3100#endif
3101
1b784140
YX
3102int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3103 int flags)
1da177e4
LT
3104{
3105 struct sock *sk = sock->sk;
3106 int addr_len = 0;
3107 int err;
3108
1b784140 3109 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
1da177e4
LT
3110 flags & ~MSG_DONTWAIT, &addr_len);
3111 if (err >= 0)
3112 msg->msg_namelen = addr_len;
3113 return err;
3114}
1da177e4
LT
3115EXPORT_SYMBOL(sock_common_recvmsg);
3116
3117/*
3118 * Set socket options on an inet socket.
3119 */
3120int sock_common_setsockopt(struct socket *sock, int level, int optname,
b7058842 3121 char __user *optval, unsigned int optlen)
1da177e4
LT
3122{
3123 struct sock *sk = sock->sk;
3124
3125 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3126}
1da177e4
LT
3127EXPORT_SYMBOL(sock_common_setsockopt);
3128
3fdadf7d 3129#ifdef CONFIG_COMPAT
543d9cfe 3130int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
b7058842 3131 char __user *optval, unsigned int optlen)
3fdadf7d
DM
3132{
3133 struct sock *sk = sock->sk;
3134
543d9cfe
ACM
3135 if (sk->sk_prot->compat_setsockopt != NULL)
3136 return sk->sk_prot->compat_setsockopt(sk, level, optname,
3137 optval, optlen);
3fdadf7d
DM
3138 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3139}
3140EXPORT_SYMBOL(compat_sock_common_setsockopt);
3141#endif
3142
1da177e4
LT
3143void sk_common_release(struct sock *sk)
3144{
3145 if (sk->sk_prot->destroy)
3146 sk->sk_prot->destroy(sk);
3147
3148 /*
3149 * Observation: when sock_common_release is called, processes have
3150 * no access to socket. But net still has.
3151 * Step one, detach it from networking:
3152 *
3153 * A. Remove from hash tables.
3154 */
3155
3156 sk->sk_prot->unhash(sk);
3157
3158 /*
3159 * In this point socket cannot receive new packets, but it is possible
3160 * that some packets are in flight because some CPU runs receiver and
3161 * did hash table lookup before we unhashed socket. They will achieve
3162 * receive queue and will be purged by socket destructor.
3163 *
3164 * Also we still have packets pending on receive queue and probably,
3165 * our own packets waiting in device queues. sock_destroy will drain
3166 * receive queue, but transmitted packets will delay socket destruction
3167 * until the last reference will be released.
3168 */
3169
3170 sock_orphan(sk);
3171
3172 xfrm_sk_free_policy(sk);
3173
e6848976 3174 sk_refcnt_debug_release(sk);
5640f768 3175
1da177e4
LT
3176 sock_put(sk);
3177}
1da177e4
LT
3178EXPORT_SYMBOL(sk_common_release);
3179
a2d133b1
JH
3180void sk_get_meminfo(const struct sock *sk, u32 *mem)
3181{
3182 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3183
3184 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3185 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
3186 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3187 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
3188 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3189 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
3190 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3191 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
3192 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3193}
3194
13ff3d6f
PE
3195#ifdef CONFIG_PROC_FS
3196#define PROTO_INUSE_NR 64 /* should be enough for the first time */
1338d466
PE
3197struct prot_inuse {
3198 int val[PROTO_INUSE_NR];
3199};
13ff3d6f
PE
3200
3201static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
70ee1159 3202
70ee1159
PE
3203void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3204{
08fc7f81 3205 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
70ee1159
PE
3206}
3207EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3208
3209int sock_prot_inuse_get(struct net *net, struct proto *prot)
3210{
3211 int cpu, idx = prot->inuse_idx;
3212 int res = 0;
3213
3214 for_each_possible_cpu(cpu)
08fc7f81 3215 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
70ee1159
PE
3216
3217 return res >= 0 ? res : 0;
3218}
3219EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3220
648845ab
TZ
3221static void sock_inuse_add(struct net *net, int val)
3222{
3223 this_cpu_add(*net->core.sock_inuse, val);
3224}
3225
3226int sock_inuse_get(struct net *net)
3227{
3228 int cpu, res = 0;
3229
3230 for_each_possible_cpu(cpu)
3231 res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3232
3233 return res;
3234}
3235
3236EXPORT_SYMBOL_GPL(sock_inuse_get);
3237
2c8c1e72 3238static int __net_init sock_inuse_init_net(struct net *net)
70ee1159 3239{
08fc7f81 3240 net->core.prot_inuse = alloc_percpu(struct prot_inuse);
648845ab
TZ
3241 if (net->core.prot_inuse == NULL)
3242 return -ENOMEM;
3243
3244 net->core.sock_inuse = alloc_percpu(int);
3245 if (net->core.sock_inuse == NULL)
3246 goto out;
3247
3248 return 0;
3249
3250out:
3251 free_percpu(net->core.prot_inuse);
3252 return -ENOMEM;
70ee1159
PE
3253}
3254
2c8c1e72 3255static void __net_exit sock_inuse_exit_net(struct net *net)
70ee1159 3256{
08fc7f81 3257 free_percpu(net->core.prot_inuse);
648845ab 3258 free_percpu(net->core.sock_inuse);
70ee1159
PE
3259}
3260
3261static struct pernet_operations net_inuse_ops = {
3262 .init = sock_inuse_init_net,
3263 .exit = sock_inuse_exit_net,
3264};
3265
3266static __init int net_inuse_init(void)
3267{
3268 if (register_pernet_subsys(&net_inuse_ops))
3269 panic("Cannot initialize net inuse counters");
3270
3271 return 0;
3272}
3273
3274core_initcall(net_inuse_init);
13ff3d6f
PE
3275
3276static void assign_proto_idx(struct proto *prot)
3277{
3278 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3279
3280 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
e005d193 3281 pr_err("PROTO_INUSE_NR exhausted\n");
13ff3d6f
PE
3282 return;
3283 }
3284
3285 set_bit(prot->inuse_idx, proto_inuse_idx);
3286}
3287
3288static void release_proto_idx(struct proto *prot)
3289{
3290 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3291 clear_bit(prot->inuse_idx, proto_inuse_idx);
3292}
3293#else
3294static inline void assign_proto_idx(struct proto *prot)
3295{
3296}
3297
3298static inline void release_proto_idx(struct proto *prot)
3299{
3300}
648845ab
TZ
3301
3302static void sock_inuse_add(struct net *net, int val)
3303{
3304}
13ff3d6f
PE
3305#endif
3306
0159dfd3
ED
3307static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3308{
3309 if (!rsk_prot)
3310 return;
3311 kfree(rsk_prot->slab_name);
3312 rsk_prot->slab_name = NULL;
adf78eda
JL
3313 kmem_cache_destroy(rsk_prot->slab);
3314 rsk_prot->slab = NULL;
0159dfd3
ED
3315}
3316
3317static int req_prot_init(const struct proto *prot)
3318{
3319 struct request_sock_ops *rsk_prot = prot->rsk_prot;
3320
3321 if (!rsk_prot)
3322 return 0;
3323
3324 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3325 prot->name);
3326 if (!rsk_prot->slab_name)
3327 return -ENOMEM;
3328
3329 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3330 rsk_prot->obj_size, 0,
e699e2c6
SB
3331 SLAB_ACCOUNT | prot->slab_flags,
3332 NULL);
0159dfd3
ED
3333
3334 if (!rsk_prot->slab) {
3335 pr_crit("%s: Can't create request sock SLAB cache!\n",
3336 prot->name);
3337 return -ENOMEM;
3338 }
3339 return 0;
3340}
3341
b733c007
PE
3342int proto_register(struct proto *prot, int alloc_slab)
3343{
1da177e4 3344 if (alloc_slab) {
30c2c9f1
DW
3345 prot->slab = kmem_cache_create_usercopy(prot->name,
3346 prot->obj_size, 0,
e699e2c6
SB
3347 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3348 prot->slab_flags,
289a4860 3349 prot->useroffset, prot->usersize,
271b72c7 3350 NULL);
1da177e4
LT
3351
3352 if (prot->slab == NULL) {
e005d193
JP
3353 pr_crit("%s: Can't create sock SLAB cache!\n",
3354 prot->name);
60e7663d 3355 goto out;
1da177e4 3356 }
2e6599cb 3357
0159dfd3
ED
3358 if (req_prot_init(prot))
3359 goto out_free_request_sock_slab;
8feaf0c0 3360
6d6ee43e 3361 if (prot->twsk_prot != NULL) {
faf23422 3362 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
8feaf0c0 3363
7e56b5d6 3364 if (prot->twsk_prot->twsk_slab_name == NULL)
8feaf0c0
ACM
3365 goto out_free_request_sock_slab;
3366
6d6ee43e 3367 prot->twsk_prot->twsk_slab =
7e56b5d6 3368 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
6d6ee43e 3369 prot->twsk_prot->twsk_obj_size,
3ab5aee7 3370 0,
e699e2c6 3371 SLAB_ACCOUNT |
52db70dc 3372 prot->slab_flags,
20c2df83 3373 NULL);
6d6ee43e 3374 if (prot->twsk_prot->twsk_slab == NULL)
8feaf0c0
ACM
3375 goto out_free_timewait_sock_slab_name;
3376 }
1da177e4
LT
3377 }
3378
36b77a52 3379 mutex_lock(&proto_list_mutex);
1da177e4 3380 list_add(&prot->node, &proto_list);
13ff3d6f 3381 assign_proto_idx(prot);
36b77a52 3382 mutex_unlock(&proto_list_mutex);
b733c007
PE
3383 return 0;
3384
8feaf0c0 3385out_free_timewait_sock_slab_name:
7e56b5d6 3386 kfree(prot->twsk_prot->twsk_slab_name);
8feaf0c0 3387out_free_request_sock_slab:
0159dfd3
ED
3388 req_prot_cleanup(prot->rsk_prot);
3389
2e6599cb
ACM
3390 kmem_cache_destroy(prot->slab);
3391 prot->slab = NULL;
b733c007
PE
3392out:
3393 return -ENOBUFS;
1da177e4 3394}
1da177e4
LT
3395EXPORT_SYMBOL(proto_register);
3396
3397void proto_unregister(struct proto *prot)
3398{
36b77a52 3399 mutex_lock(&proto_list_mutex);
13ff3d6f 3400 release_proto_idx(prot);
0a3f4358 3401 list_del(&prot->node);
36b77a52 3402 mutex_unlock(&proto_list_mutex);
1da177e4 3403
adf78eda
JL
3404 kmem_cache_destroy(prot->slab);
3405 prot->slab = NULL;
1da177e4 3406
0159dfd3 3407 req_prot_cleanup(prot->rsk_prot);
2e6599cb 3408
6d6ee43e 3409 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
6d6ee43e 3410 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
7e56b5d6 3411 kfree(prot->twsk_prot->twsk_slab_name);
6d6ee43e 3412 prot->twsk_prot->twsk_slab = NULL;
8feaf0c0 3413 }
1da177e4 3414}
1da177e4
LT
3415EXPORT_SYMBOL(proto_unregister);
3416
bf2ae2e4
XL
3417int sock_load_diag_module(int family, int protocol)
3418{
3419 if (!protocol) {
3420 if (!sock_is_registered(family))
3421 return -ENOENT;
3422
3423 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3424 NETLINK_SOCK_DIAG, family);
3425 }
3426
3427#ifdef CONFIG_INET
3428 if (family == AF_INET &&
c34c1287 3429 protocol != IPPROTO_RAW &&
bf2ae2e4
XL
3430 !rcu_access_pointer(inet_protos[protocol]))
3431 return -ENOENT;
3432#endif
3433
3434 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3435 NETLINK_SOCK_DIAG, family, protocol);
3436}
3437EXPORT_SYMBOL(sock_load_diag_module);
3438
1da177e4 3439#ifdef CONFIG_PROC_FS
1da177e4 3440static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
36b77a52 3441 __acquires(proto_list_mutex)
1da177e4 3442{
36b77a52 3443 mutex_lock(&proto_list_mutex);
60f0438a 3444 return seq_list_start_head(&proto_list, *pos);
1da177e4
LT
3445}
3446
3447static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3448{
60f0438a 3449 return seq_list_next(v, &proto_list, pos);
1da177e4
LT
3450}
3451
3452static void proto_seq_stop(struct seq_file *seq, void *v)
36b77a52 3453 __releases(proto_list_mutex)
1da177e4 3454{
36b77a52 3455 mutex_unlock(&proto_list_mutex);
1da177e4
LT
3456}
3457
3458static char proto_method_implemented(const void *method)
3459{
3460 return method == NULL ? 'n' : 'y';
3461}
180d8cd9
GC
3462static long sock_prot_memory_allocated(struct proto *proto)
3463{
cb75a36c 3464 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
180d8cd9
GC
3465}
3466
3467static char *sock_prot_memory_pressure(struct proto *proto)
3468{
3469 return proto->memory_pressure != NULL ?
3470 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3471}
1da177e4
LT
3472
3473static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3474{
180d8cd9 3475
8d987e5c 3476 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
1da177e4
LT
3477 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3478 proto->name,
3479 proto->obj_size,
14e943db 3480 sock_prot_inuse_get(seq_file_net(seq), proto),
180d8cd9
GC
3481 sock_prot_memory_allocated(proto),
3482 sock_prot_memory_pressure(proto),
1da177e4
LT
3483 proto->max_header,
3484 proto->slab == NULL ? "no" : "yes",
3485 module_name(proto->owner),
3486 proto_method_implemented(proto->close),
3487 proto_method_implemented(proto->connect),
3488 proto_method_implemented(proto->disconnect),
3489 proto_method_implemented(proto->accept),
3490 proto_method_implemented(proto->ioctl),
3491 proto_method_implemented(proto->init),
3492 proto_method_implemented(proto->destroy),
3493 proto_method_implemented(proto->shutdown),
3494 proto_method_implemented(proto->setsockopt),
3495 proto_method_implemented(proto->getsockopt),
3496 proto_method_implemented(proto->sendmsg),
3497 proto_method_implemented(proto->recvmsg),
3498 proto_method_implemented(proto->sendpage),
3499 proto_method_implemented(proto->bind),
3500 proto_method_implemented(proto->backlog_rcv),
3501 proto_method_implemented(proto->hash),
3502 proto_method_implemented(proto->unhash),
3503 proto_method_implemented(proto->get_port),
3504 proto_method_implemented(proto->enter_memory_pressure));
3505}
3506
3507static int proto_seq_show(struct seq_file *seq, void *v)
3508{
60f0438a 3509 if (v == &proto_list)
1da177e4
LT
3510 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3511 "protocol",
3512 "size",
3513 "sockets",
3514 "memory",
3515 "press",
3516 "maxhdr",
3517 "slab",
3518 "module",
3519 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3520 else
60f0438a 3521 proto_seq_printf(seq, list_entry(v, struct proto, node));
1da177e4
LT
3522 return 0;
3523}
3524
f690808e 3525static const struct seq_operations proto_seq_ops = {
1da177e4
LT
3526 .start = proto_seq_start,
3527 .next = proto_seq_next,
3528 .stop = proto_seq_stop,
3529 .show = proto_seq_show,
3530};
3531
14e943db
ED
3532static __net_init int proto_init_net(struct net *net)
3533{
c3506372
CH
3534 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
3535 sizeof(struct seq_net_private)))
14e943db
ED
3536 return -ENOMEM;
3537
3538 return 0;
3539}
3540
3541static __net_exit void proto_exit_net(struct net *net)
3542{
ece31ffd 3543 remove_proc_entry("protocols", net->proc_net);
14e943db
ED
3544}
3545
3546
3547static __net_initdata struct pernet_operations proto_net_ops = {
3548 .init = proto_init_net,
3549 .exit = proto_exit_net,
1da177e4
LT
3550};
3551
3552static int __init proto_init(void)
3553{
14e943db 3554 return register_pernet_subsys(&proto_net_ops);
1da177e4
LT
3555}
3556
3557subsys_initcall(proto_init);
3558
3559#endif /* PROC_FS */
7db6b048
SS
3560
3561#ifdef CONFIG_NET_RX_BUSY_POLL
3562bool sk_busy_loop_end(void *p, unsigned long start_time)
3563{
3564 struct sock *sk = p;
3565
3566 return !skb_queue_empty(&sk->sk_receive_queue) ||
3567 sk_busy_loop_timeout(sk, start_time);
3568}
3569EXPORT_SYMBOL(sk_busy_loop_end);
3570#endif /* CONFIG_NET_RX_BUSY_POLL */